comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
You can make it package-private so that your tests can leverage this constant. | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
ArrayList<Checkpoint> list = new ArrayList<>();
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, "checkpoint");
String checkpointJson;
if (checkpointJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
}
else {
checkpointJson = checkpointJsonList.get(0);
}
Checkpoint checkpoint = serializer.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
list.add(checkpoint);
}
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
} | List<String> checkpointJsonList = jedis.hmget(member, "checkpoint"); | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private final JedisPool jedisPool;
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> list = new ArrayList<>();
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, "partitionOwnership");
String partitionOwnershipJson;
if (partitionOwnershipJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
} else {
partitionOwnershipJson = partitionOwnershipJsonList.get(0);
}
PartitionOwnership partitionOwnership = serializer.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
list.add(partitionOwnership); }
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
ObjectSerializer serializer = DEFAULT_SERIALIZER;
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, "checkpoint", new String(serializer.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
private String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
private String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
According to the documentation, a null value is returned if the field variable does not exist. You could be deserializing a null. https://redis.io/commands/hmget/ | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
ArrayList<Checkpoint> list = new ArrayList<>();
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, "checkpoint");
String checkpointJson;
if (checkpointJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
}
else {
checkpointJson = checkpointJsonList.get(0);
}
Checkpoint checkpoint = serializer.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
list.add(checkpoint);
}
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
} | checkpointJson = checkpointJsonList.get(0); | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private final JedisPool jedisPool;
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> list = new ArrayList<>();
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, "partitionOwnership");
String partitionOwnershipJson;
if (partitionOwnershipJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
} else {
partitionOwnershipJson = partitionOwnershipJsonList.get(0);
}
PartitionOwnership partitionOwnership = serializer.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
list.add(partitionOwnership); }
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
ObjectSerializer serializer = DEFAULT_SERIALIZER;
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, "checkpoint", new String(serializer.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
private String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
private String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
You can use Flux.fromIterable rather than wrapping it in a stream again. Creating a list just to temporarily hold these results seems like a lot of overhead... is it possible to use Flux.create? | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
ArrayList<Checkpoint> list = new ArrayList<>();
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, "checkpoint");
String checkpointJson;
if (checkpointJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
}
else {
checkpointJson = checkpointJsonList.get(0);
}
Checkpoint checkpoint = serializer.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
list.add(checkpoint);
}
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
} | return Flux.fromStream(list.stream()); | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private final JedisPool jedisPool;
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> list = new ArrayList<>();
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, "partitionOwnership");
String partitionOwnershipJson;
if (partitionOwnershipJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
} else {
partitionOwnershipJson = partitionOwnershipJsonList.get(0);
}
PartitionOwnership partitionOwnership = serializer.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
list.add(partitionOwnership); }
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
ObjectSerializer serializer = DEFAULT_SERIALIZER;
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, "checkpoint", new String(serializer.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
private String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
private String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
Do we need to assign this to a local variable? | public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> list = new ArrayList<>();
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, "partitionOwnership");
String partitionOwnershipJson;
if (partitionOwnershipJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
} else {
partitionOwnershipJson = partitionOwnershipJsonList.get(0);
}
PartitionOwnership partitionOwnership = serializer.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
list.add(partitionOwnership); }
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
} | ObjectSerializer serializer = DEFAULT_SERIALIZER; | public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private final JedisPool jedisPool;
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
ArrayList<Checkpoint> list = new ArrayList<>();
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, "checkpoint");
String checkpointJson;
if (checkpointJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
}
else {
checkpointJson = checkpointJsonList.get(0);
}
Checkpoint checkpoint = serializer.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
list.add(checkpoint);
}
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
}
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
ObjectSerializer serializer = DEFAULT_SERIALIZER;
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, "checkpoint", new String(serializer.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
private String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
private String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
}
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
Same comments as the ones above. | public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> list = new ArrayList<>();
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, "partitionOwnership");
String partitionOwnershipJson;
if (partitionOwnershipJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
} else {
partitionOwnershipJson = partitionOwnershipJsonList.get(0);
}
PartitionOwnership partitionOwnership = serializer.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
list.add(partitionOwnership); }
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
} | for (String member : members) { | public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private final JedisPool jedisPool;
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
ArrayList<Checkpoint> list = new ArrayList<>();
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, "checkpoint");
String checkpointJson;
if (checkpointJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
}
else {
checkpointJson = checkpointJsonList.get(0);
}
Checkpoint checkpoint = serializer.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
list.add(checkpoint);
}
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
}
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
ObjectSerializer serializer = DEFAULT_SERIALIZER;
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, "checkpoint", new String(serializer.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
private String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
private String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
}
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
`1L` will also work. | public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup("consumerGroup")
.setEventHubName("eventHubName")
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setPartitionId("one")
.setSequenceNumber((long) 1);
Set<String> value = new HashSet<>();
List<String> list = new ArrayList<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
list.add(new String(bytes));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("checkpoint"))).thenReturn(list);
StepVerifier.create(store.listCheckpoints("fullyQualifiedNamespace", "eventHubName", "consumerGroup"))
.assertNext(checkpointTest -> {
assertEquals("fullyQualifiedNamespace", checkpointTest.getFullyQualifiedNamespace());
assertEquals("eventHubName", checkpointTest.getEventHubName());
assertEquals("consumerGroup", checkpointTest.getConsumerGroup());
})
.verifyComplete();
} | .setSequenceNumber((long) 1); | public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("
Assertions.assertThrows(IllegalArgumentException.class, () -> store.listCheckpoints("", "", ""));
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("checkpoint"))).thenThrow(new NoSuchElementException());
Assertions.assertThrows(NoSuchElementException.class, () -> store.listCheckpoints("fullyQualifiedNamespace", "eventHubName", "consumerGroup"));
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setEventHubName("eventHubName")
.setConsumerGroup("consumerGroup")
.setPartitionId("one")
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
List<String> list = new ArrayList<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
list.add(new String(bytes));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("partitionOwnership"))).thenReturn(list);
StepVerifier.create(store.listOwnership("fullyQualifiedNamespace", "eventHubName", "consumerGroup"))
.assertNext(partitionOwnershipTest -> {
assertEquals("fullyQualifiedNamespace", partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals("eventHubName", partitionOwnershipTest.getEventHubName());
assertEquals("consumerGroup", partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("
Assertions.assertThrows(IllegalArgumentException.class, () -> store.listOwnership("", "", ""));
}
@Test
public void testListOwnershipKeyNotStored() {
Set<String> value = new HashSet<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("partitionOwnership"))).thenThrow(new NoSuchElementException());
Assertions.assertThrows(NoSuchElementException.class, () -> store.listOwnership("fullyQualifiedNamespace", "eventHubName", "consumerGroup"));
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
Assertions.assertNull(store.claimOwnership(partitionOwnershipList));
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup("consumerGroup")
.setEventHubName("eventHubName")
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setPartitionId("one")
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(true);
Assertions.assertNull(store.updateCheckpoint(checkpoint));
}
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
List<String> nullList = Collections.singletonList(null);
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(nullList);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.verifyComplete();
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.verifyComplete();
}
} |
1. I'd consider specifying the character set. `new String(bytes, StandardCharset.UTF_8)` 2. Since this array has a single item, can we use `Collections.singletonlist` or `Arrays.asList()` to clean up the declarations? | public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup("consumerGroup")
.setEventHubName("eventHubName")
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setPartitionId("one")
.setSequenceNumber((long) 1);
Set<String> value = new HashSet<>();
List<String> list = new ArrayList<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
list.add(new String(bytes));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("checkpoint"))).thenReturn(list);
StepVerifier.create(store.listCheckpoints("fullyQualifiedNamespace", "eventHubName", "consumerGroup"))
.assertNext(checkpointTest -> {
assertEquals("fullyQualifiedNamespace", checkpointTest.getFullyQualifiedNamespace());
assertEquals("eventHubName", checkpointTest.getEventHubName());
assertEquals("consumerGroup", checkpointTest.getConsumerGroup());
})
.verifyComplete();
} | list.add(new String(bytes)); | public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("
Assertions.assertThrows(IllegalArgumentException.class, () -> store.listCheckpoints("", "", ""));
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("checkpoint"))).thenThrow(new NoSuchElementException());
Assertions.assertThrows(NoSuchElementException.class, () -> store.listCheckpoints("fullyQualifiedNamespace", "eventHubName", "consumerGroup"));
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setEventHubName("eventHubName")
.setConsumerGroup("consumerGroup")
.setPartitionId("one")
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
List<String> list = new ArrayList<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
list.add(new String(bytes));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("partitionOwnership"))).thenReturn(list);
StepVerifier.create(store.listOwnership("fullyQualifiedNamespace", "eventHubName", "consumerGroup"))
.assertNext(partitionOwnershipTest -> {
assertEquals("fullyQualifiedNamespace", partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals("eventHubName", partitionOwnershipTest.getEventHubName());
assertEquals("consumerGroup", partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("
Assertions.assertThrows(IllegalArgumentException.class, () -> store.listOwnership("", "", ""));
}
@Test
public void testListOwnershipKeyNotStored() {
Set<String> value = new HashSet<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("partitionOwnership"))).thenThrow(new NoSuchElementException());
Assertions.assertThrows(NoSuchElementException.class, () -> store.listOwnership("fullyQualifiedNamespace", "eventHubName", "consumerGroup"));
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
Assertions.assertNull(store.claimOwnership(partitionOwnershipList));
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup("consumerGroup")
.setEventHubName("eventHubName")
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setPartitionId("one")
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(true);
Assertions.assertNull(store.updateCheckpoint(checkpoint));
}
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
List<String> nullList = Collections.singletonList(null);
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(nullList);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.verifyComplete();
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.verifyComplete();
}
} |
It'll be easier to update the test if you specify constants... for example, if you change "one" to "1", now you have to Ctrl+F other possible usages of it. | public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup("consumerGroup")
.setEventHubName("eventHubName")
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setPartitionId("one")
.setSequenceNumber((long) 1);
Set<String> value = new HashSet<>();
List<String> list = new ArrayList<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
list.add(new String(bytes));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("checkpoint"))).thenReturn(list);
StepVerifier.create(store.listCheckpoints("fullyQualifiedNamespace", "eventHubName", "consumerGroup"))
.assertNext(checkpointTest -> {
assertEquals("fullyQualifiedNamespace", checkpointTest.getFullyQualifiedNamespace());
assertEquals("eventHubName", checkpointTest.getEventHubName());
assertEquals("consumerGroup", checkpointTest.getConsumerGroup());
})
.verifyComplete();
} | value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"); | public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("
Assertions.assertThrows(IllegalArgumentException.class, () -> store.listCheckpoints("", "", ""));
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("checkpoint"))).thenThrow(new NoSuchElementException());
Assertions.assertThrows(NoSuchElementException.class, () -> store.listCheckpoints("fullyQualifiedNamespace", "eventHubName", "consumerGroup"));
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setEventHubName("eventHubName")
.setConsumerGroup("consumerGroup")
.setPartitionId("one")
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
List<String> list = new ArrayList<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
list.add(new String(bytes));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("partitionOwnership"))).thenReturn(list);
StepVerifier.create(store.listOwnership("fullyQualifiedNamespace", "eventHubName", "consumerGroup"))
.assertNext(partitionOwnershipTest -> {
assertEquals("fullyQualifiedNamespace", partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals("eventHubName", partitionOwnershipTest.getEventHubName());
assertEquals("consumerGroup", partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("
Assertions.assertThrows(IllegalArgumentException.class, () -> store.listOwnership("", "", ""));
}
@Test
public void testListOwnershipKeyNotStored() {
Set<String> value = new HashSet<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("partitionOwnership"))).thenThrow(new NoSuchElementException());
Assertions.assertThrows(NoSuchElementException.class, () -> store.listOwnership("fullyQualifiedNamespace", "eventHubName", "consumerGroup"));
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
Assertions.assertNull(store.claimOwnership(partitionOwnershipList));
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup("consumerGroup")
.setEventHubName("eventHubName")
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setPartitionId("one")
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(true);
Assertions.assertNull(store.updateCheckpoint(checkpoint));
}
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
List<String> nullList = Collections.singletonList(null);
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(nullList);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.verifyComplete();
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.verifyComplete();
}
} |
And since you use the same constants in this test, you can specify them as static variables in this class. | public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup("consumerGroup")
.setEventHubName("eventHubName")
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setPartitionId("one")
.setSequenceNumber((long) 1);
Set<String> value = new HashSet<>();
List<String> list = new ArrayList<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
list.add(new String(bytes));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("checkpoint"))).thenReturn(list);
StepVerifier.create(store.listCheckpoints("fullyQualifiedNamespace", "eventHubName", "consumerGroup"))
.assertNext(checkpointTest -> {
assertEquals("fullyQualifiedNamespace", checkpointTest.getFullyQualifiedNamespace());
assertEquals("eventHubName", checkpointTest.getEventHubName());
assertEquals("consumerGroup", checkpointTest.getConsumerGroup());
})
.verifyComplete();
} | value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"); | public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("
Assertions.assertThrows(IllegalArgumentException.class, () -> store.listCheckpoints("", "", ""));
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("checkpoint"))).thenThrow(new NoSuchElementException());
Assertions.assertThrows(NoSuchElementException.class, () -> store.listCheckpoints("fullyQualifiedNamespace", "eventHubName", "consumerGroup"));
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setEventHubName("eventHubName")
.setConsumerGroup("consumerGroup")
.setPartitionId("one")
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
List<String> list = new ArrayList<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
list.add(new String(bytes));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("partitionOwnership"))).thenReturn(list);
StepVerifier.create(store.listOwnership("fullyQualifiedNamespace", "eventHubName", "consumerGroup"))
.assertNext(partitionOwnershipTest -> {
assertEquals("fullyQualifiedNamespace", partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals("eventHubName", partitionOwnershipTest.getEventHubName());
assertEquals("consumerGroup", partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("
Assertions.assertThrows(IllegalArgumentException.class, () -> store.listOwnership("", "", ""));
}
@Test
public void testListOwnershipKeyNotStored() {
Set<String> value = new HashSet<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("partitionOwnership"))).thenThrow(new NoSuchElementException());
Assertions.assertThrows(NoSuchElementException.class, () -> store.listOwnership("fullyQualifiedNamespace", "eventHubName", "consumerGroup"));
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
Assertions.assertNull(store.claimOwnership(partitionOwnershipList));
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup("consumerGroup")
.setEventHubName("eventHubName")
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setPartitionId("one")
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(true);
Assertions.assertNull(store.updateCheckpoint(checkpoint));
}
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
List<String> nullList = Collections.singletonList(null);
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(nullList);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.verifyComplete();
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.verifyComplete();
}
} |
listCheckpoints should return a Flux.... I would expect the assertion to be using StepVerify.assertError or something similar. | public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("checkpoint"))).thenThrow(new NoSuchElementException());
Assertions.assertThrows(NoSuchElementException.class, () -> store.listCheckpoints("fullyQualifiedNamespace", "eventHubName", "consumerGroup"));
} | Assertions.assertThrows(NoSuchElementException.class, () -> store.listCheckpoints("fullyQualifiedNamespace", "eventHubName", "consumerGroup")); | public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
List<String> nullList = Collections.singletonList(null);
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(nullList);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup("consumerGroup")
.setEventHubName("eventHubName")
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setPartitionId("one")
.setSequenceNumber((long) 1);
Set<String> value = new HashSet<>();
List<String> list = new ArrayList<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
list.add(new String(bytes));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("checkpoint"))).thenReturn(list);
StepVerifier.create(store.listCheckpoints("fullyQualifiedNamespace", "eventHubName", "consumerGroup"))
.assertNext(checkpointTest -> {
assertEquals("fullyQualifiedNamespace", checkpointTest.getFullyQualifiedNamespace());
assertEquals("eventHubName", checkpointTest.getEventHubName());
assertEquals("consumerGroup", checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("
Assertions.assertThrows(IllegalArgumentException.class, () -> store.listCheckpoints("", "", ""));
}
@Test
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setEventHubName("eventHubName")
.setConsumerGroup("consumerGroup")
.setPartitionId("one")
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
List<String> list = new ArrayList<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
list.add(new String(bytes));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("partitionOwnership"))).thenReturn(list);
StepVerifier.create(store.listOwnership("fullyQualifiedNamespace", "eventHubName", "consumerGroup"))
.assertNext(partitionOwnershipTest -> {
assertEquals("fullyQualifiedNamespace", partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals("eventHubName", partitionOwnershipTest.getEventHubName());
assertEquals("consumerGroup", partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("
Assertions.assertThrows(IllegalArgumentException.class, () -> store.listOwnership("", "", ""));
}
@Test
public void testListOwnershipKeyNotStored() {
Set<String> value = new HashSet<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("partitionOwnership"))).thenThrow(new NoSuchElementException());
Assertions.assertThrows(NoSuchElementException.class, () -> store.listOwnership("fullyQualifiedNamespace", "eventHubName", "consumerGroup"));
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
Assertions.assertNull(store.claimOwnership(partitionOwnershipList));
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup("consumerGroup")
.setEventHubName("eventHubName")
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setPartitionId("one")
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(true);
Assertions.assertNull(store.updateCheckpoint(checkpoint));
}
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.verifyComplete();
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.verifyComplete();
}
} |
I'd remove the act since it is on line 137... and also the same comment applies, since listOwnership returns a Flux, users would expect that the error be propagated through flux's error rather than throwing. | public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("
Assertions.assertThrows(IllegalArgumentException.class, () -> store.listOwnership("", "", ""));
} | public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup("consumerGroup")
.setEventHubName("eventHubName")
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setPartitionId("one")
.setSequenceNumber((long) 1);
Set<String> value = new HashSet<>();
List<String> list = new ArrayList<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
list.add(new String(bytes));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("checkpoint"))).thenReturn(list);
StepVerifier.create(store.listCheckpoints("fullyQualifiedNamespace", "eventHubName", "consumerGroup"))
.assertNext(checkpointTest -> {
assertEquals("fullyQualifiedNamespace", checkpointTest.getFullyQualifiedNamespace());
assertEquals("eventHubName", checkpointTest.getEventHubName());
assertEquals("consumerGroup", checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("
Assertions.assertThrows(IllegalArgumentException.class, () -> store.listCheckpoints("", "", ""));
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("checkpoint"))).thenThrow(new NoSuchElementException());
Assertions.assertThrows(NoSuchElementException.class, () -> store.listCheckpoints("fullyQualifiedNamespace", "eventHubName", "consumerGroup"));
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setEventHubName("eventHubName")
.setConsumerGroup("consumerGroup")
.setPartitionId("one")
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
List<String> list = new ArrayList<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
list.add(new String(bytes));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("partitionOwnership"))).thenReturn(list);
StepVerifier.create(store.listOwnership("fullyQualifiedNamespace", "eventHubName", "consumerGroup"))
.assertNext(partitionOwnershipTest -> {
assertEquals("fullyQualifiedNamespace", partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals("eventHubName", partitionOwnershipTest.getEventHubName());
assertEquals("consumerGroup", partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
@Test
public void testListOwnershipKeyNotStored() {
Set<String> value = new HashSet<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("partitionOwnership"))).thenThrow(new NoSuchElementException());
Assertions.assertThrows(NoSuchElementException.class, () -> store.listOwnership("fullyQualifiedNamespace", "eventHubName", "consumerGroup"));
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
Assertions.assertNull(store.claimOwnership(partitionOwnershipList));
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup("consumerGroup")
.setEventHubName("eventHubName")
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setPartitionId("one")
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(true);
Assertions.assertNull(store.updateCheckpoint(checkpoint));
}
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
List<String> nullList = Collections.singletonList(null);
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(nullList);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.verifyComplete();
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.verifyComplete();
}
} | |
Not specifically. Went ahead and changed the code to use the DEFAULT_SERIALIZER instead. | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
ArrayList<Checkpoint> list = new ArrayList<>();
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, "checkpoint");
String checkpointJson;
if (checkpointJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
}
else {
checkpointJson = checkpointJsonList.get(0);
}
Checkpoint checkpoint = serializer.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
list.add(checkpoint);
}
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
} | ObjectSerializer serializer = DEFAULT_SERIALIZER; | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private final JedisPool jedisPool;
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> list = new ArrayList<>();
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, "partitionOwnership");
String partitionOwnershipJson;
if (partitionOwnershipJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
} else {
partitionOwnershipJson = partitionOwnershipJsonList.get(0);
}
PartitionOwnership partitionOwnership = serializer.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
list.add(partitionOwnership); }
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
ObjectSerializer serializer = DEFAULT_SERIALIZER;
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, "checkpoint", new String(serializer.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
private String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
private String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
Thanks Connie! I didn't know about the concept of singletonList but I have gone ahead and used it now! Thank you :) | public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup("consumerGroup")
.setEventHubName("eventHubName")
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setPartitionId("one")
.setSequenceNumber((long) 1);
Set<String> value = new HashSet<>();
List<String> list = new ArrayList<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
list.add(new String(bytes));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("checkpoint"))).thenReturn(list);
StepVerifier.create(store.listCheckpoints("fullyQualifiedNamespace", "eventHubName", "consumerGroup"))
.assertNext(checkpointTest -> {
assertEquals("fullyQualifiedNamespace", checkpointTest.getFullyQualifiedNamespace());
assertEquals("eventHubName", checkpointTest.getEventHubName());
assertEquals("consumerGroup", checkpointTest.getConsumerGroup());
})
.verifyComplete();
} | list.add(new String(bytes)); | public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("
Assertions.assertThrows(IllegalArgumentException.class, () -> store.listCheckpoints("", "", ""));
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("checkpoint"))).thenThrow(new NoSuchElementException());
Assertions.assertThrows(NoSuchElementException.class, () -> store.listCheckpoints("fullyQualifiedNamespace", "eventHubName", "consumerGroup"));
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setEventHubName("eventHubName")
.setConsumerGroup("consumerGroup")
.setPartitionId("one")
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
List<String> list = new ArrayList<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
list.add(new String(bytes));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("partitionOwnership"))).thenReturn(list);
StepVerifier.create(store.listOwnership("fullyQualifiedNamespace", "eventHubName", "consumerGroup"))
.assertNext(partitionOwnershipTest -> {
assertEquals("fullyQualifiedNamespace", partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals("eventHubName", partitionOwnershipTest.getEventHubName());
assertEquals("consumerGroup", partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("
Assertions.assertThrows(IllegalArgumentException.class, () -> store.listOwnership("", "", ""));
}
@Test
public void testListOwnershipKeyNotStored() {
Set<String> value = new HashSet<>();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one");
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(value);
when(jedis.hmget(eq("fullyQualifiedNamespace/eventHubNamespace/consumerGroup/one"),
eq("partitionOwnership"))).thenThrow(new NoSuchElementException());
Assertions.assertThrows(NoSuchElementException.class, () -> store.listOwnership("fullyQualifiedNamespace", "eventHubName", "consumerGroup"));
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
Assertions.assertNull(store.claimOwnership(partitionOwnershipList));
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup("consumerGroup")
.setEventHubName("eventHubName")
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setPartitionId("one")
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists("fullyQualifiedNamespace/eventHubName/consumerGroup")).thenReturn(true);
Assertions.assertNull(store.updateCheckpoint(checkpoint));
}
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
List<String> nullList = Collections.singletonList(null);
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(nullList);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.verifyComplete();
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.verifyComplete();
}
} |
Thanks! That makes sense, I am now returning a Flux generated from an empty Stream. Would that address this? | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
ArrayList<Checkpoint> list = new ArrayList<>();
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, "checkpoint");
String checkpointJson;
if (checkpointJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
}
else {
checkpointJson = checkpointJsonList.get(0);
}
Checkpoint checkpoint = serializer.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
list.add(checkpoint);
}
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
} | return Flux.error(new IllegalArgumentException()); | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private final JedisPool jedisPool;
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> list = new ArrayList<>();
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, "partitionOwnership");
String partitionOwnershipJson;
if (partitionOwnershipJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
} else {
partitionOwnershipJson = partitionOwnershipJsonList.get(0);
}
PartitionOwnership partitionOwnership = serializer.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
list.add(partitionOwnership); }
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
ObjectSerializer serializer = DEFAULT_SERIALIZER;
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, "checkpoint", new String(serializer.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
private String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
private String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
That's true. I've gone ahead and changed it such that all the Flux.fromStream are Flux.fromIterable instead. | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
ArrayList<Checkpoint> list = new ArrayList<>();
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, "checkpoint");
String checkpointJson;
if (checkpointJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
}
else {
checkpointJson = checkpointJsonList.get(0);
}
Checkpoint checkpoint = serializer.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
list.add(checkpoint);
}
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
} | return Flux.fromStream(list.stream()); | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private final JedisPool jedisPool;
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> list = new ArrayList<>();
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, "partitionOwnership");
String partitionOwnershipJson;
if (partitionOwnershipJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
} else {
partitionOwnershipJson = partitionOwnershipJsonList.get(0);
}
PartitionOwnership partitionOwnership = serializer.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
list.add(partitionOwnership); }
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
ObjectSerializer serializer = DEFAULT_SERIALIZER;
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, "checkpoint", new String(serializer.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
private String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
private String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
I'm checking for a null now rather than for the empty list since that aligns better with what hmget would return. Thanks for pointing it out! | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
ArrayList<Checkpoint> list = new ArrayList<>();
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, "checkpoint");
String checkpointJson;
if (checkpointJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
}
else {
checkpointJson = checkpointJsonList.get(0);
}
Checkpoint checkpoint = serializer.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
list.add(checkpoint);
}
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
} | checkpointJson = checkpointJsonList.get(0); | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private final JedisPool jedisPool;
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> list = new ArrayList<>();
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, "partitionOwnership");
String partitionOwnershipJson;
if (partitionOwnershipJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
} else {
partitionOwnershipJson = partitionOwnershipJsonList.get(0);
}
PartitionOwnership partitionOwnership = serializer.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
list.add(partitionOwnership); }
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
ObjectSerializer serializer = DEFAULT_SERIALIZER;
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, "checkpoint", new String(serializer.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
private String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
private String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
Do we need to explicitly returnResource? I thought that's why the try() {} block existed. If we do need to returnResource, I don't see it being returned in all other paths.. | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
jedisPool.returnResource(jedis);
if (checkpointJsonList == null) {
return Flux.error(new IllegalStateException("No checkpoints persist in Redis for the given parameters."));
}
else {
String checkpointJson = checkpointJsonList.get(0);
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
}
}
return Flux.fromIterable(listStoredCheckpoints);
}
} | jedisPool.returnResource(jedis); | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
jedisPool.returnResource(jedis);
if (partitionOwnershipJsonList == null) {
return Flux.error(new IllegalStateException("No ownership record persist in Redis for the given parameters."));
} else {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
Does this fail checkstyles? iirc, it should be `} else {` rather than `} newline else {` | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
jedisPool.returnResource(jedis);
if (checkpointJsonList == null) {
return Flux.error(new IllegalStateException("No checkpoints persist in Redis for the given parameters."));
}
else {
String checkpointJson = checkpointJsonList.get(0);
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
}
}
return Flux.fromIterable(listStoredCheckpoints);
}
} | else { | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
jedisPool.returnResource(jedis);
if (partitionOwnershipJsonList == null) {
return Flux.error(new IllegalStateException("No ownership record persist in Redis for the given parameters."));
} else {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
Same comment as above about returning resources | public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
jedisPool.returnResource(jedis);
if (partitionOwnershipJsonList == null) {
return Flux.error(new IllegalStateException("No ownership record persist in Redis for the given parameters."));
} else {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
return Flux.fromIterable(listStoredOwnerships);
}
} | jedisPool.returnResource(jedis); | public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
jedisPool.returnResource(jedis);
if (checkpointJsonList == null) {
return Flux.error(new IllegalStateException("No checkpoints persist in Redis for the given parameters."));
}
else {
String checkpointJson = checkpointJsonList.get(0);
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
}
}
return Flux.fromIterable(listStoredCheckpoints);
}
}
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
}
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
If there's no partition ownership record for this value.. can we just set a default one or keep going and skip this in the returned results? (ie. no one has owned this before.) I don't know if this is a failed state that we _can't_ recover from. | public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
jedisPool.returnResource(jedis);
if (partitionOwnershipJsonList == null) {
return Flux.error(new IllegalStateException("No ownership record persist in Redis for the given parameters."));
} else {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
return Flux.fromIterable(listStoredOwnerships);
}
} | return Flux.error(new IllegalStateException("No ownership record persist in Redis for the given parameters.")); | public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
jedisPool.returnResource(jedis);
if (checkpointJsonList == null) {
return Flux.error(new IllegalStateException("No checkpoints persist in Redis for the given parameters."));
}
else {
String checkpointJson = checkpointJsonList.get(0);
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
}
}
return Flux.fromIterable(listStoredCheckpoints);
}
}
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
}
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
What if the list is empty? this will return an index out of bounds exception.... same with usage below. | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
jedisPool.returnResource(jedis);
if (checkpointJsonList == null) {
return Flux.error(new IllegalStateException("No checkpoints persist in Redis for the given parameters."));
}
else {
String checkpointJson = checkpointJsonList.get(0);
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
}
}
return Flux.fromIterable(listStoredCheckpoints);
}
} | String checkpointJson = checkpointJsonList.get(0); | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
jedisPool.returnResource(jedis);
if (partitionOwnershipJsonList == null) {
return Flux.error(new IllegalStateException("No ownership record persist in Redis for the given parameters."));
} else {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
Where is the act and assert here? | public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
} | } | public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.verifyComplete();
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
Assertions.assertNull(checkpointTest);
}
);
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(null);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectError(IllegalStateException.class)
.verify();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
Assertions.assertNull(partitionOwnershipTest);
}
);
}
@Test
public void testListOwnershipKeyNotStored() {
Set<String> value = new HashSet<>();
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(null);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectError(IllegalStateException.class)
.verify();
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.assertNext(partitionOwnership -> {
Assertions.assertNull(partitionOwnership);
});
}
@Test
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
List<String> nullList = Collections.singletonList(null);
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(nullList);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.verifyComplete();
}
@Test
} |
onNext should always contain an item... also, you haven't set `.verify();` on this, so it is not running this test at all. | public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.assertNext(partitionOwnership -> {
Assertions.assertNull(partitionOwnership);
});
} | Assertions.assertNull(partitionOwnership); | public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.verifyComplete();
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
Assertions.assertNull(checkpointTest);
}
);
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(null);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectError(IllegalStateException.class)
.verify();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
Assertions.assertNull(partitionOwnershipTest);
}
);
}
@Test
public void testListOwnershipKeyNotStored() {
Set<String> value = new HashSet<>();
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(null);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectError(IllegalStateException.class)
.verify();
}
@Test
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
}
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
List<String> nullList = Collections.singletonList(null);
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(nullList);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.verifyComplete();
}
} |
Missing the final steps to actually `verify()` test. | public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
Assertions.assertNull(partitionOwnershipTest);
}
);
} | ); | public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
Assertions.assertNull(checkpointTest);
}
);
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(null);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectError(IllegalStateException.class)
.verify();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
@Test
public void testListOwnershipKeyNotStored() {
Set<String> value = new HashSet<>();
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(null);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectError(IllegalStateException.class)
.verify();
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.assertNext(partitionOwnership -> {
Assertions.assertNull(partitionOwnership);
});
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
}
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
List<String> nullList = Collections.singletonList(null);
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(nullList);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.verifyComplete();
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.verifyComplete();
}
} |
missing a verify. | public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
Assertions.assertNull(checkpointTest);
}
);
} | ); | public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(null);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectError(IllegalStateException.class)
.verify();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
Assertions.assertNull(partitionOwnershipTest);
}
);
}
@Test
public void testListOwnershipKeyNotStored() {
Set<String> value = new HashSet<>();
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(null);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectError(IllegalStateException.class)
.verify();
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.assertNext(partitionOwnership -> {
Assertions.assertNull(partitionOwnership);
});
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
}
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
List<String> nullList = Collections.singletonList(null);
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(nullList);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.verifyComplete();
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.verifyComplete();
}
} |
I believe the idea is to return it before the try block ends, since unless you explicitly return the resource the pool assumes that the jedis instance is still in use. | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
jedisPool.returnResource(jedis);
if (checkpointJsonList == null) {
return Flux.error(new IllegalStateException("No checkpoints persist in Redis for the given parameters."));
}
else {
String checkpointJson = checkpointJsonList.get(0);
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
}
}
return Flux.fromIterable(listStoredCheckpoints);
}
} | jedisPool.returnResource(jedis); | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
jedisPool.returnResource(jedis);
if (partitionOwnershipJsonList == null) {
return Flux.error(new IllegalStateException("No ownership record persist in Redis for the given parameters."));
} else {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
That makes sense! I think skipping this result instead would be a better idea. | public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
jedisPool.returnResource(jedis);
if (partitionOwnershipJsonList == null) {
return Flux.error(new IllegalStateException("No ownership record persist in Redis for the given parameters."));
} else {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
return Flux.fromIterable(listStoredOwnerships);
}
} | return Flux.error(new IllegalStateException("No ownership record persist in Redis for the given parameters.")); | public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
jedisPool.returnResource(jedis);
if (checkpointJsonList == null) {
return Flux.error(new IllegalStateException("No checkpoints persist in Redis for the given parameters."));
}
else {
String checkpointJson = checkpointJsonList.get(0);
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
}
}
return Flux.fromIterable(listStoredCheckpoints);
}
}
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
}
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
https://docs.oracle.com/javase/tutorial/essential/exceptions/tryResourceClose.html The syntactic sugar for `try-with-resource` is doing a `close` after the try block ends. . . is this the same as `returnResource`? | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
jedisPool.returnResource(jedis);
if (checkpointJsonList == null) {
return Flux.error(new IllegalStateException("No checkpoints persist in Redis for the given parameters."));
}
else {
String checkpointJson = checkpointJsonList.get(0);
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
}
}
return Flux.fromIterable(listStoredCheckpoints);
}
} | jedisPool.returnResource(jedis); | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
jedisPool.returnResource(jedis);
if (partitionOwnershipJsonList == null) {
return Flux.error(new IllegalStateException("No ownership record persist in Redis for the given parameters."));
} else {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
Why are we expecting 0L? You can simply use ``` StepVerifier.create(store.updateCheckpoint(checkpoint)) .verifyComplete(); ``` | public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.expectNextCount(0L)
.verifyComplete();
} | .expectNextCount(0L) | public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.verifyComplete();
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectNextCount(0L)
.verifyComplete();
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(null);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectError(IllegalStateException.class)
.verify();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectNextCount(0L)
.verifyComplete();
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.expectNextCount(0L)
.verifyComplete();
}
@Test
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
List<String> nullList = Collections.singletonList(null);
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(nullList);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.verifyComplete();
}
@Test
} |
Why expect 0L? That means the mono is just expected to complete without emitting any items. you can remove this line. | public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectNextCount(0L)
.verifyComplete();
} | .expectNextCount(0L) | public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectNextCount(0L)
.verifyComplete();
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(null);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectError(IllegalStateException.class)
.verify();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.expectNextCount(0L)
.verifyComplete();
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.expectNextCount(0L)
.verifyComplete();
}
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
List<String> nullList = Collections.singletonList(null);
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(nullList);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.verifyComplete();
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.verifyComplete();
}
} |
That seems a lot easier! I was thinking that since I'm expecting no elements in the Flux I could do it that way too. | public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.expectNextCount(0L)
.verifyComplete();
} | .expectNextCount(0L) | public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.verifyComplete();
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectNextCount(0L)
.verifyComplete();
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(null);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectError(IllegalStateException.class)
.verify();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectNextCount(0L)
.verifyComplete();
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.expectNextCount(0L)
.verifyComplete();
}
@Test
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
List<String> nullList = Collections.singletonList(null);
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(nullList);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.verifyComplete();
}
@Test
} |
Thanks for explaining this. I went ahead and removed it :) | public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectNextCount(0L)
.verifyComplete();
} | .expectNextCount(0L) | public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectNextCount(0L)
.verifyComplete();
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(null);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectError(IllegalStateException.class)
.verify();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.expectNextCount(0L)
.verifyComplete();
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.expectNextCount(0L)
.verifyComplete();
}
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
List<String> nullList = Collections.singletonList(null);
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(nullList);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.verifyComplete();
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.verifyComplete();
}
} |
I believe it's more succinct to use the snippet below instead of adding returnResource to every exit point. ```java try (var jedis = stuff) { ... } finally { jedisPool.returnResource(jedis); } ``` | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
jedisPool.returnResource(jedis);
if (checkpointJsonList == null) {
return Flux.error(new IllegalStateException("No checkpoints persist in Redis for the given parameters."));
}
else {
String checkpointJson = checkpointJsonList.get(0);
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
}
}
return Flux.fromIterable(listStoredCheckpoints);
}
} | jedisPool.returnResource(jedis); | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
jedisPool.returnResource(jedis);
if (partitionOwnershipJsonList == null) {
return Flux.error(new IllegalStateException("No ownership record persist in Redis for the given parameters."));
} else {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
I don't think we should error. If there's no checkpoint for a partition, it's possible they haven't persisted a checkpoint yet. My suggestion was to lean towards logging and then just "continuing" to fetch additional checkpoints. | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (checkpointJsonList == null) {
jedisPool.returnResource(jedis);
return Flux.error(new IllegalStateException("No checkpoints persist in Redis for the given parameters."));
}
if (checkpointJsonList.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.error(new IllegalStateException("No checkpoints persist in Redis for the given parameters."));
} else {
String checkpointJson = checkpointJsonList.get(0);
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | return Flux.error(new IllegalStateException("No checkpoints persist in Redis for the given parameters.")); | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (partitionOwnershipJsonList == null) {
continue;
}
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
Same comment about try/finally | public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
jedisPool.returnResource(jedis);
if (partitionOwnershipJsonList == null) {
return Flux.error(new IllegalStateException("No ownership record persist in Redis for the given parameters."));
} else {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
return Flux.fromIterable(listStoredOwnerships);
}
} | jedisPool.returnResource(jedis); | public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
jedisPool.returnResource(jedis);
if (checkpointJsonList == null) {
return Flux.error(new IllegalStateException("No checkpoints persist in Redis for the given parameters."));
}
else {
String checkpointJson = checkpointJsonList.get(0);
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
}
}
return Flux.fromIterable(listStoredCheckpoints);
}
}
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
}
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
I went ahead and made that change. Thank you :D | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (checkpointJsonList == null) {
jedisPool.returnResource(jedis);
return Flux.error(new IllegalStateException("No checkpoints persist in Redis for the given parameters."));
}
if (checkpointJsonList.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.error(new IllegalStateException("No checkpoints persist in Redis for the given parameters."));
} else {
String checkpointJson = checkpointJsonList.get(0);
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | return Flux.error(new IllegalStateException("No checkpoints persist in Redis for the given parameters.")); | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (partitionOwnershipJsonList == null) {
continue;
}
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
```suggestion if (CriteriaType.isFunction(criteria.getType()) && !CriteriaType.isUnary(criteria.getType())) { ``` | private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType()) && ! CriteriaType.isUnary(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter, false);
} else if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter, true);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
} | if (CriteriaType.isFunction(criteria.getType()) && ! CriteriaType.isUnary(criteria.getType())) { | private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter,
CriteriaType.isFunctionWithCaseSensitiveSupport(criteria.getType()));
} else if (criteria.getType() == CriteriaType.IS_EQUAL
&& ignoreCase != Part.IgnoreCaseType.NEVER
&& subjectValue instanceof String) {
return getFunctionCondition(ignoreCase, CriteriaType.STRING_EQUALS.getSqlKeyword(),
subject, parameter, true);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
} | class AbstractQueryGenerator {
/**
* Initialization
*/
protected AbstractQueryGenerator() {
}
private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @param takesCaseSensitiveParam if the function type can take the third boolean param
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter, final boolean takesCaseSensitiveParam) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
if (takesCaseSensitiveParam) {
return String.format("%s(r.%s, @%s, true)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s), true)", sqlKeyword, subject, parameter);
}
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.format("(%s %s %s)", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
/**
* Generates a Cosmos query.
*
* @param query the representation for query method.
* @param queryHead the query head.
* @return the SQL query spec.
*/
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} | class AbstractQueryGenerator {
/**
* Initialization
*/
protected AbstractQueryGenerator() {
}
private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @param takesCaseSensitiveParam if the function type can take the third boolean param
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter, final boolean takesCaseSensitiveParam) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
if (takesCaseSensitiveParam) {
return String.format("%s(r.%s, @%s, true)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s))", sqlKeyword, subject, parameter);
}
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.format("(%s %s %s)", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
/**
* Generates a Cosmos query.
*
* @param query the representation for query method.
* @param queryHead the query head.
* @return the SQL query spec.
*/
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} |
I don't quite get this - StartsWith and EndsWith allow setting the boolean parameters whether to do case sensitive search or not - So I assume you would want to pass true into takesCaseSensitiveParam - but isUnary(STARTS_WITH) would return false - meaning you would end-up setting to takesCaseSensitiveParam and end-up calling UPPER again (being a horribly inefficient query) | private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType()) && ! CriteriaType.isUnary(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter, false);
} else if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter, true);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
} | if (CriteriaType.isFunction(criteria.getType()) && ! CriteriaType.isUnary(criteria.getType())) { | private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter,
CriteriaType.isFunctionWithCaseSensitiveSupport(criteria.getType()));
} else if (criteria.getType() == CriteriaType.IS_EQUAL
&& ignoreCase != Part.IgnoreCaseType.NEVER
&& subjectValue instanceof String) {
return getFunctionCondition(ignoreCase, CriteriaType.STRING_EQUALS.getSqlKeyword(),
subject, parameter, true);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
} | class AbstractQueryGenerator {
/**
* Initialization
*/
protected AbstractQueryGenerator() {
}
private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @param takesCaseSensitiveParam if the function type can take the third boolean param
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter, final boolean takesCaseSensitiveParam) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
if (takesCaseSensitiveParam) {
return String.format("%s(r.%s, @%s, true)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s), true)", sqlKeyword, subject, parameter);
}
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.format("(%s %s %s)", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
/**
* Generates a Cosmos query.
*
* @param query the representation for query method.
* @param queryHead the query head.
* @return the SQL query spec.
*/
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} | class AbstractQueryGenerator {
/**
* Initialization
*/
protected AbstractQueryGenerator() {
}
private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @param takesCaseSensitiveParam if the function type can take the third boolean param
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter, final boolean takesCaseSensitiveParam) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
if (takesCaseSensitiveParam) {
return String.format("%s(r.%s, @%s, true)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s))", sqlKeyword, subject, parameter);
}
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.format("(%s %s %s)", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
/**
* Generates a Cosmos query.
*
* @param query the representation for query method.
* @param queryHead the query head.
* @return the SQL query spec.
*/
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} |
what would happen if the value is null? for example, if I have the following document: ``` { "id": "id1", "mypk": "mypk1", "extraProperty": null, } ``` select * from r where r.extraProperty = null will return the item select * from r where STRINGEQUALS(r.extraProperty, null) will not return anything instead we do the conversion(from IS_EQUAL -> STRINGEQUALS) internally, should customer made the choice by themselves? | private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter,
CriteriaType.isFunctionWithCaseSensitiveSupport(criteria.getType()));
} else if (criteria.getType() == CriteriaType.IS_EQUAL
&& ignoreCase != Part.IgnoreCaseType.NEVER
&& subjectValue.getClass().getTypeName().equals("java.lang.String")) {
return getFunctionCondition(ignoreCase, CriteriaType.STRING_EQUAL.getSqlKeyword(),
subject, parameter, true);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
} | return getFunctionCondition(ignoreCase, CriteriaType.STRING_EQUAL.getSqlKeyword(), | private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter,
CriteriaType.isFunctionWithCaseSensitiveSupport(criteria.getType()));
} else if (criteria.getType() == CriteriaType.IS_EQUAL
&& ignoreCase != Part.IgnoreCaseType.NEVER
&& subjectValue instanceof String) {
return getFunctionCondition(ignoreCase, CriteriaType.STRING_EQUALS.getSqlKeyword(),
subject, parameter, true);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
} | class AbstractQueryGenerator {
/**
* Initialization
*/
protected AbstractQueryGenerator() {
}
private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @param takesCaseSensitiveParam if the function type can take the third boolean param
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter, final boolean takesCaseSensitiveParam) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
if (takesCaseSensitiveParam) {
return String.format("%s(r.%s, @%s, true)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s))", sqlKeyword, subject, parameter);
}
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.format("(%s %s %s)", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
/**
* Generates a Cosmos query.
*
* @param query the representation for query method.
* @param queryHead the query head.
* @return the SQL query spec.
*/
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} | class AbstractQueryGenerator {
/**
* Initialization
*/
protected AbstractQueryGenerator() {
}
private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @param takesCaseSensitiveParam if the function type can take the third boolean param
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter, final boolean takesCaseSensitiveParam) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
if (takesCaseSensitiveParam) {
return String.format("%s(r.%s, @%s, true)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s))", sqlKeyword, subject, parameter);
}
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.format("(%s %s %s)", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
/**
* Generates a Cosmos query.
*
* @param query the representation for query method.
* @param queryHead the query head.
* @return the SQL query spec.
*/
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} |
So isFunction() returns true for IS_NULL and IS_NOT_NULL which do not support the third true parameter in "%s(UPPER(r.%s), UPPER(@%s), true)" For that reason, we also want to make sure !isUnary() that way we do not set takesCaseSensitiveParam to true for IS_NULL and IS_NOT_NULL calls as that would cause an error. | private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType()) && ! CriteriaType.isUnary(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter, false);
} else if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter, true);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
} | if (CriteriaType.isFunction(criteria.getType()) && ! CriteriaType.isUnary(criteria.getType())) { | private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter,
CriteriaType.isFunctionWithCaseSensitiveSupport(criteria.getType()));
} else if (criteria.getType() == CriteriaType.IS_EQUAL
&& ignoreCase != Part.IgnoreCaseType.NEVER
&& subjectValue instanceof String) {
return getFunctionCondition(ignoreCase, CriteriaType.STRING_EQUALS.getSqlKeyword(),
subject, parameter, true);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
} | class AbstractQueryGenerator {
/**
* Initialization
*/
protected AbstractQueryGenerator() {
}
private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @param takesCaseSensitiveParam if the function type can take the third boolean param
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter, final boolean takesCaseSensitiveParam) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
if (takesCaseSensitiveParam) {
return String.format("%s(r.%s, @%s, true)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s), true)", sqlKeyword, subject, parameter);
}
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.format("(%s %s %s)", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
/**
* Generates a Cosmos query.
*
* @param query the representation for query method.
* @param queryHead the query head.
* @return the SQL query spec.
*/
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} | class AbstractQueryGenerator {
/**
* Initialization
*/
protected AbstractQueryGenerator() {
}
private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @param takesCaseSensitiveParam if the function type can take the third boolean param
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter, final boolean takesCaseSensitiveParam) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
if (takesCaseSensitiveParam) {
return String.format("%s(r.%s, @%s, true)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s))", sqlKeyword, subject, parameter);
}
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.format("(%s %s %s)", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
/**
* Generates a Cosmos query.
*
* @param query the representation for query method.
* @param queryHead the query head.
* @return the SQL query spec.
*/
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} |
Three things: - I think you need to check subjectValue not parameter - right now you are just checking whether the parameter name follows the regex? - For efficiency reason you might want to declare a static final field with the compiled Regex - to avoid compiling it every time - the fact that the parameter name vs. actual value didn't fail any of the tests means there is a gap. For integration tests I like your tests. But I think it is appropriate to add unit tests for just this method - to assert that UPPER is only used in the output when appropriate etc. Easiest way to get protection from future regressions that could functionally still be correct (like right now) - but incorrectly fall back to the inefficient path. | private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter,
CriteriaType.isFunctionWithCaseSensitiveSupport(criteria.getType()));
} else if(criteria.getType() == CriteriaType.IS_EQUAL &&
ignoreCase != Part.IgnoreCaseType.NEVER &&
!Pattern.compile("-?\\d+(\\.\\d+)?").matcher(parameter).matches()) {
return getFunctionCondition(ignoreCase, CriteriaType.STRING_EQUAL.getSqlKeyword(),
subject, parameter, true);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
} | !Pattern.compile("-?\\d+(\\.\\d+)?").matcher(parameter).matches()) { | private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter,
CriteriaType.isFunctionWithCaseSensitiveSupport(criteria.getType()));
} else if (criteria.getType() == CriteriaType.IS_EQUAL
&& ignoreCase != Part.IgnoreCaseType.NEVER
&& subjectValue instanceof String) {
return getFunctionCondition(ignoreCase, CriteriaType.STRING_EQUALS.getSqlKeyword(),
subject, parameter, true);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
} | class AbstractQueryGenerator {
/**
* Initialization
*/
protected AbstractQueryGenerator() {
}
private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @param takesCaseSensitiveParam if the function type can take the third boolean param
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter, final boolean takesCaseSensitiveParam) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
if (takesCaseSensitiveParam) {
return String.format("%s(r.%s, @%s, true)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s))", sqlKeyword, subject, parameter);
}
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.format("(%s %s %s)", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
/**
* Generates a Cosmos query.
*
* @param query the representation for query method.
* @param queryHead the query head.
* @return the SQL query spec.
*/
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} | class AbstractQueryGenerator {
/**
* Initialization
*/
protected AbstractQueryGenerator() {
}
private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @param takesCaseSensitiveParam if the function type can take the third boolean param
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter, final boolean takesCaseSensitiveParam) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
if (takesCaseSensitiveParam) {
return String.format("%s(r.%s, @%s, true)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s))", sqlKeyword, subject, parameter);
}
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.format("(%s %s %s)", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
/**
* Generates a Cosmos query.
*
* @param query the representation for query method.
* @param queryHead the query head.
* @return the SQL query spec.
*/
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} |
1. Fixed 2. Since using subjectValue, it is an object so achieved without regex 3. Added tests | private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter,
CriteriaType.isFunctionWithCaseSensitiveSupport(criteria.getType()));
} else if(criteria.getType() == CriteriaType.IS_EQUAL &&
ignoreCase != Part.IgnoreCaseType.NEVER &&
!Pattern.compile("-?\\d+(\\.\\d+)?").matcher(parameter).matches()) {
return getFunctionCondition(ignoreCase, CriteriaType.STRING_EQUAL.getSqlKeyword(),
subject, parameter, true);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
} | !Pattern.compile("-?\\d+(\\.\\d+)?").matcher(parameter).matches()) { | private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter,
CriteriaType.isFunctionWithCaseSensitiveSupport(criteria.getType()));
} else if (criteria.getType() == CriteriaType.IS_EQUAL
&& ignoreCase != Part.IgnoreCaseType.NEVER
&& subjectValue instanceof String) {
return getFunctionCondition(ignoreCase, CriteriaType.STRING_EQUALS.getSqlKeyword(),
subject, parameter, true);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
} | class AbstractQueryGenerator {
/**
* Initialization
*/
protected AbstractQueryGenerator() {
}
private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @param takesCaseSensitiveParam if the function type can take the third boolean param
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter, final boolean takesCaseSensitiveParam) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
if (takesCaseSensitiveParam) {
return String.format("%s(r.%s, @%s, true)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s))", sqlKeyword, subject, parameter);
}
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.format("(%s %s %s)", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
/**
* Generates a Cosmos query.
*
* @param query the representation for query method.
* @param queryHead the query head.
* @return the SQL query spec.
*/
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} | class AbstractQueryGenerator {
/**
* Initialization
*/
protected AbstractQueryGenerator() {
}
private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @param takesCaseSensitiveParam if the function type can take the third boolean param
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter, final boolean takesCaseSensitiveParam) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
if (takesCaseSensitiveParam) {
return String.format("%s(r.%s, @%s, true)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s))", sqlKeyword, subject, parameter);
}
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.format("(%s %s %s)", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
/**
* Generates a Cosmos query.
*
* @param query the representation for query method.
* @param queryHead the query head.
* @return the SQL query spec.
*/
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} |
Good catch | private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter,
CriteriaType.isFunctionWithCaseSensitiveSupport(criteria.getType()));
} else if (criteria.getType() == CriteriaType.IS_EQUAL
&& ignoreCase != Part.IgnoreCaseType.NEVER
&& subjectValue.getClass().getTypeName().equals("java.lang.String")) {
return getFunctionCondition(ignoreCase, CriteriaType.STRING_EQUAL.getSqlKeyword(),
subject, parameter, true);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
} | return getFunctionCondition(ignoreCase, CriteriaType.STRING_EQUAL.getSqlKeyword(), | private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter,
CriteriaType.isFunctionWithCaseSensitiveSupport(criteria.getType()));
} else if (criteria.getType() == CriteriaType.IS_EQUAL
&& ignoreCase != Part.IgnoreCaseType.NEVER
&& subjectValue instanceof String) {
return getFunctionCondition(ignoreCase, CriteriaType.STRING_EQUALS.getSqlKeyword(),
subject, parameter, true);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
} | class AbstractQueryGenerator {
/**
* Initialization
*/
protected AbstractQueryGenerator() {
}
private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @param takesCaseSensitiveParam if the function type can take the third boolean param
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter, final boolean takesCaseSensitiveParam) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
if (takesCaseSensitiveParam) {
return String.format("%s(r.%s, @%s, true)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s))", sqlKeyword, subject, parameter);
}
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.format("(%s %s %s)", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
/**
* Generates a Cosmos query.
*
* @param query the representation for query method.
* @param queryHead the query head.
* @return the SQL query spec.
*/
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} | class AbstractQueryGenerator {
/**
* Initialization
*/
protected AbstractQueryGenerator() {
}
private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @param takesCaseSensitiveParam if the function type can take the third boolean param
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter, final boolean takesCaseSensitiveParam) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
if (takesCaseSensitiveParam) {
return String.format("%s(r.%s, @%s, true)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s))", sqlKeyword, subject, parameter);
}
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.format("(%s %s %s)", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
/**
* Generates a Cosmos query.
*
* @param query the representation for query method.
* @param queryHead the query head.
* @return the SQL query spec.
*/
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} |
Fixed | private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter,
CriteriaType.isFunctionWithCaseSensitiveSupport(criteria.getType()));
} else if (criteria.getType() == CriteriaType.IS_EQUAL
&& ignoreCase != Part.IgnoreCaseType.NEVER
&& subjectValue.getClass().getTypeName().equals("java.lang.String")) {
return getFunctionCondition(ignoreCase, CriteriaType.STRING_EQUAL.getSqlKeyword(),
subject, parameter, true);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
} | return getFunctionCondition(ignoreCase, CriteriaType.STRING_EQUAL.getSqlKeyword(), | private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter,
CriteriaType.isFunctionWithCaseSensitiveSupport(criteria.getType()));
} else if (criteria.getType() == CriteriaType.IS_EQUAL
&& ignoreCase != Part.IgnoreCaseType.NEVER
&& subjectValue instanceof String) {
return getFunctionCondition(ignoreCase, CriteriaType.STRING_EQUALS.getSqlKeyword(),
subject, parameter, true);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
} | class AbstractQueryGenerator {
/**
* Initialization
*/
protected AbstractQueryGenerator() {
}
private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @param takesCaseSensitiveParam if the function type can take the third boolean param
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter, final boolean takesCaseSensitiveParam) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
if (takesCaseSensitiveParam) {
return String.format("%s(r.%s, @%s, true)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s))", sqlKeyword, subject, parameter);
}
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.format("(%s %s %s)", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
/**
* Generates a Cosmos query.
*
* @param query the representation for query method.
* @param queryHead the query head.
* @return the SQL query spec.
*/
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} | class AbstractQueryGenerator {
/**
* Initialization
*/
protected AbstractQueryGenerator() {
}
private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @param takesCaseSensitiveParam if the function type can take the third boolean param
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter, final boolean takesCaseSensitiveParam) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
if (takesCaseSensitiveParam) {
return String.format("%s(r.%s, @%s, true)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s))", sqlKeyword, subject, parameter);
}
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.format("(%s %s %s)", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
/**
* Generates a Cosmos query.
*
* @param query the representation for query method.
* @param queryHead the query head.
* @return the SQL query spec.
*/
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} |
We'll want to make sure the chunk is fully packed, or the InputStream ended, before creating another chunk. Right now this suffers from an amplification attack if a SequenceInputStream with 2000 1 byte InputStreams (relatively cheap, only a few MB in heap size) causes this to create 2000 * 8MB chunks likely OOME'ing | private static InputStreamContent readAndBuffer(InputStream inputStream, Long length) {
try {
Vector<ByteArrayInputStream> chunkInputStreams = new Vector<>();
int chunkSize = INITIAL_BUFFER_CHUNK_SIZE;
if (length != null) {
chunkSize = (int) Math.min(MAX_BUFFER_CHUNK_SIZE, length);
}
int read;
long totalRead = 0;
long actualLength = length != null ? length : Long.MAX_VALUE;
do {
byte[] chunk = new byte[chunkSize];
read = inputStream.read(chunk);
if (read > 0) {
chunkInputStreams.add(new ByteArrayInputStream(chunk, 0, read));
totalRead += read;
int nextChunkSizeCandidate = 2 * chunkSize;
if (nextChunkSizeCandidate <= actualLength - totalRead
&& nextChunkSizeCandidate <= MAX_BUFFER_CHUNK_SIZE) {
chunkSize = nextChunkSizeCandidate;
}
}
} while (read >= 0);
return new InputStreamContent(
() -> {
for (ByteArrayInputStream chunkInputStream : chunkInputStreams) {
chunkInputStream.reset();
}
return new SequenceInputStream(chunkInputStreams.elements());
}, length, true);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
} | if (read > 0) { | private static InputStreamContent readAndBuffer(InputStream inputStream, Long length) {
try {
List<ByteBuffer> byteBuffers = StreamUtil.readStreamToListOfByteBuffers(
inputStream, length, INITIAL_BUFFER_CHUNK_SIZE, MAX_BUFFER_CHUNK_SIZE);
return new InputStreamContent(
() -> new IterableOfByteBuffersInputStream(byteBuffers),
length, true);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
} | class InputStreamContent extends BinaryDataContent {
private static final ClientLogger LOGGER = new ClientLogger(InputStreamContent.class);
private static final int INITIAL_BUFFER_CHUNK_SIZE = 8 * 1024;
private static final int MAX_BUFFER_CHUNK_SIZE = 8 * 1024 * 1024;
private static final int MAX_ARRAY_LENGTH = Integer.MAX_VALUE - 8;
private final Supplier<InputStream> content;
private final Long length;
private final AtomicReference<byte[]> bytes = new AtomicReference<>();
private final boolean isReplayable;
/**
* Creates an instance of {@link InputStreamContent}.
*
* @param inputStream The inputStream that is used as the content for this instance.
* @throws NullPointerException if {@code content} is null.
*/
public InputStreamContent(InputStream inputStream, Long length) {
Objects.requireNonNull(inputStream, "'inputStream' cannot be null.");
this.content = () -> inputStream;
this.length = length;
isReplayable = false;
}
private InputStreamContent(Supplier<InputStream> inputStreamSupplier, Long length, boolean isReplayable) {
this.content = Objects.requireNonNull(inputStreamSupplier, "'inputStreamSupplier' cannot be null.");
this.length = length;
this.isReplayable = isReplayable;
}
@Override
public Long getLength() {
if (bytes.get() != null) {
return (long) bytes.get().length;
}
return length;
}
@Override
public String toString() {
return new String(toBytes(), StandardCharsets.UTF_8);
}
@Override
public byte[] toBytes() {
byte[] data = this.bytes.get();
if (data == null) {
bytes.set(getBytes());
data = this.bytes.get();
}
return data;
}
@Override
public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) {
return serializer.deserializeFromBytes(toBytes(), typeReference);
}
@Override
public InputStream toStream() {
return this.content.get();
}
@Override
public ByteBuffer toByteBuffer() {
return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer();
}
@Override
public Flux<ByteBuffer> toFluxByteBuffer() {
return FluxUtil.toFluxByteBuffer(this.content.get(), STREAM_READ_SIZE);
}
@Override
public boolean isReplayable() {
return isReplayable;
}
@Override
public BinaryDataContent toReplayableContent() {
if (isReplayable) {
return this;
}
InputStream inputStream = this.content.get();
if (canMarkReset(inputStream, length)) {
return createMarkResetContent(inputStream, length);
} else {
return readAndBuffer(inputStream, length);
}
}
@Override
public Mono<BinaryDataContent> toReplayableContentAsync() {
if (isReplayable) {
return Mono.just(this);
}
InputStream inputStream = this.content.get();
if (canMarkReset(inputStream, length)) {
return Mono.fromCallable(() -> createMarkResetContent(inputStream, length));
} else {
return Mono.just(inputStream)
.publishOn(Schedulers.boundedElastic())
.map(ignore -> readAndBuffer(inputStream, length));
}
}
private static boolean canMarkReset(InputStream inputStream, Long length) {
return length != null && length < MAX_ARRAY_LENGTH && inputStream.markSupported();
}
private static InputStreamContent createMarkResetContent(InputStream inputStream, Long length) {
inputStream.mark(length.intValue());
return new InputStreamContent(
() -> {
try {
inputStream.reset();
return inputStream;
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
}, length, true
);
}
private byte[] getBytes() {
try {
ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream();
int nRead;
byte[] data = new byte[STREAM_READ_SIZE];
InputStream inputStream = this.content.get();
while ((nRead = inputStream.read(data, 0, data.length)) != -1) {
dataOutputBuffer.write(data, 0, nRead);
}
return dataOutputBuffer.toByteArray();
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
} | class InputStreamContent extends BinaryDataContent {
private static final ClientLogger LOGGER = new ClientLogger(InputStreamContent.class);
private static final int INITIAL_BUFFER_CHUNK_SIZE = 8 * 1024;
private static final int MAX_BUFFER_CHUNK_SIZE = 8 * 1024 * 1024;
private static final int MAX_ARRAY_LENGTH = Integer.MAX_VALUE - 8;
private final Supplier<InputStream> content;
private final Long length;
private final AtomicReference<byte[]> bytes = new AtomicReference<>();
private final boolean isReplayable;
/**
* Creates an instance of {@link InputStreamContent}.
*
* @param inputStream The inputStream that is used as the content for this instance.
* @throws NullPointerException if {@code content} is null.
*/
public InputStreamContent(InputStream inputStream, Long length) {
Objects.requireNonNull(inputStream, "'inputStream' cannot be null.");
this.content = () -> inputStream;
this.length = length;
isReplayable = false;
}
private InputStreamContent(Supplier<InputStream> inputStreamSupplier, Long length, boolean isReplayable) {
this.content = Objects.requireNonNull(inputStreamSupplier, "'inputStreamSupplier' cannot be null.");
this.length = length;
this.isReplayable = isReplayable;
}
@Override
public Long getLength() {
if (bytes.get() != null) {
return (long) bytes.get().length;
}
return length;
}
@Override
public String toString() {
return new String(toBytes(), StandardCharsets.UTF_8);
}
@Override
public byte[] toBytes() {
byte[] data = this.bytes.get();
if (data == null) {
bytes.set(getBytes());
data = this.bytes.get();
}
return data;
}
@Override
public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) {
return serializer.deserializeFromBytes(toBytes(), typeReference);
}
@Override
public InputStream toStream() {
return this.content.get();
}
@Override
public ByteBuffer toByteBuffer() {
return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer();
}
@Override
public Flux<ByteBuffer> toFluxByteBuffer() {
return FluxUtil.toFluxByteBuffer(this.content.get(), STREAM_READ_SIZE);
}
@Override
public boolean isReplayable() {
return isReplayable;
}
@Override
public BinaryDataContent toReplayableContent() {
if (isReplayable) {
return this;
}
InputStream inputStream = this.content.get();
if (canMarkReset(inputStream, length)) {
return createMarkResetContent(inputStream, length);
} else {
return readAndBuffer(inputStream, length);
}
}
@Override
public Mono<BinaryDataContent> toReplayableContentAsync() {
if (isReplayable) {
return Mono.just(this);
}
InputStream inputStream = this.content.get();
if (canMarkReset(inputStream, length)) {
return Mono.fromCallable(() -> createMarkResetContent(inputStream, length));
}
return Mono.just(inputStream)
.publishOn(Schedulers.boundedElastic())
.map(is -> readAndBuffer(is, length));
}
private static boolean canMarkReset(InputStream inputStream, Long length) {
return length != null && length < MAX_ARRAY_LENGTH && inputStream.markSupported();
}
private static InputStreamContent createMarkResetContent(InputStream inputStream, Long length) {
inputStream.mark(length.intValue());
return new InputStreamContent(
() -> {
try {
inputStream.reset();
return inputStream;
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
}, length, true
);
}
private byte[] getBytes() {
try {
ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream();
int nRead;
byte[] data = new byte[STREAM_READ_SIZE];
InputStream inputStream = this.content.get();
while ((nRead = inputStream.read(data, 0, data.length)) != -1) {
dataOutputBuffer.write(data, 0, nRead);
}
return dataOutputBuffer.toByteArray();
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
} |
`assertEquals(String expected, String actual)`, the expected value should be the first parameter. | public void testAppConfigurationOperation() {
LOGGER.info("AppConfigurationIT begin.");
client.addConfigurationSetting(sampleKey, sampleLabel, sampleValue);
ConfigurationSetting configurationSetting = client.getConfigurationSetting(sampleKey, sampleLabel);
Assertions.assertEquals(configurationSetting.getValue(), sampleValue);
LOGGER.info("AppConfigurationIT end.");
} | Assertions.assertEquals(configurationSetting.getValue(), sampleValue); | public void testAppConfigurationOperation() {
LOGGER.info("AppConfigurationIT begin.");
client.addConfigurationSetting(SAMPLE_KEY, SAMPLE_LABEL, SAMPLE_VALUE);
ConfigurationSetting configurationSetting = client.getConfigurationSetting(SAMPLE_KEY, SAMPLE_LABEL);
Assertions.assertEquals(SAMPLE_VALUE, configurationSetting.getValue());
LOGGER.info("AppConfigurationIT end.");
} | class AppConfigurationIT {
private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationIT.class);
private final String sampleKey = "sample-key";
private final String sampleLabel = "sample-label";
private final String sampleValue = "sample-value";
@Autowired
private ConfigurationClient client;
@Test
} | class AppConfigurationIT {
private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationIT.class);
private static final String SAMPLE_KEY = "sample-key";
private static final String SAMPLE_LABEL = "sample-label";
private static final String SAMPLE_VALUE = "sample-value";
@Autowired
private ConfigurationClient client;
@Test
} |
same here | public void testCosmosOperation() {
LOGGER.info("CosmosIT begin.");
User testUser = new User(
"testCosmos",
"testFirstName",
"testLastName",
"test address line two"
);
CosmosContainer container = client.getDatabase(databaseName).getContainer(containerName);
container.createItem(testUser);
CosmosPagedIterable<User> users = container.queryItems("SELECT * FROM c WHERE c.id = 'testCosmos'",
new CosmosQueryRequestOptions(),
User.class);
if (users.stream().iterator().hasNext()) {
User user = users.stream().iterator().next();
Assertions.assertEquals(user.toString(), "testFirstName testLastName, test address line two");
}
container.deleteItem(testUser, new CosmosItemRequestOptions());
CosmosPagedIterable<User> usersDelete = container.queryItems("SELECT * FROM c WHERE c.id = 'testCosmos'",
new CosmosQueryRequestOptions(),
User.class);
Assertions.assertFalse(usersDelete.iterator().hasNext());
LOGGER.info("CosmosIT end.");
} | Assertions.assertEquals(user.toString(), "testFirstName testLastName, test address line two"); | public void testCosmosOperation() {
LOGGER.info("CosmosIT begin.");
User testUser = new User(
"testCosmos",
"testFirstName",
"testLastName",
"test address line two"
);
CosmosContainer container = client.getDatabase(DATABASE_NAME).getContainer(CONTAINER_NAME);
container.createItem(testUser);
CosmosPagedIterable<User> users = container.queryItems("SELECT * FROM c WHERE c.id = 'testCosmos'",
new CosmosQueryRequestOptions(),
User.class);
if (users.stream().iterator().hasNext()) {
User user = users.stream().iterator().next();
Assertions.assertEquals(testUser, user);
}
container.deleteItem(testUser, new CosmosItemRequestOptions());
CosmosPagedIterable<User> usersAfterDeletion = container.queryItems("SELECT * FROM c WHERE c.id = 'testCosmos'",
new CosmosQueryRequestOptions(),
User.class);
Assertions.assertFalse(usersAfterDeletion.iterator().hasNext());
LOGGER.info("CosmosIT end.");
} | class CosmosIT {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosIT.class);
private final String databaseName = "TestDB";
private final String containerName = "Users";
@Autowired
private CosmosClient client;
@Test
} | class CosmosIT {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosIT.class);
private static final String DATABASE_NAME = "TestDB";
private static final String CONTAINER_NAME = "Users";
@Autowired
private CosmosClient client;
@Test
} |
bettter call it `usersAfterDeletion` | public void testCosmosOperation() {
LOGGER.info("CosmosIT begin.");
User testUser = new User(
"testCosmos",
"testFirstName",
"testLastName",
"test address line two"
);
CosmosContainer container = client.getDatabase(databaseName).getContainer(containerName);
container.createItem(testUser);
CosmosPagedIterable<User> users = container.queryItems("SELECT * FROM c WHERE c.id = 'testCosmos'",
new CosmosQueryRequestOptions(),
User.class);
if (users.stream().iterator().hasNext()) {
User user = users.stream().iterator().next();
Assertions.assertEquals(user.toString(), "testFirstName testLastName, test address line two");
}
container.deleteItem(testUser, new CosmosItemRequestOptions());
CosmosPagedIterable<User> usersDelete = container.queryItems("SELECT * FROM c WHERE c.id = 'testCosmos'",
new CosmosQueryRequestOptions(),
User.class);
Assertions.assertFalse(usersDelete.iterator().hasNext());
LOGGER.info("CosmosIT end.");
} | CosmosPagedIterable<User> usersDelete = container.queryItems("SELECT * FROM c WHERE c.id = 'testCosmos'", | public void testCosmosOperation() {
LOGGER.info("CosmosIT begin.");
User testUser = new User(
"testCosmos",
"testFirstName",
"testLastName",
"test address line two"
);
CosmosContainer container = client.getDatabase(DATABASE_NAME).getContainer(CONTAINER_NAME);
container.createItem(testUser);
CosmosPagedIterable<User> users = container.queryItems("SELECT * FROM c WHERE c.id = 'testCosmos'",
new CosmosQueryRequestOptions(),
User.class);
if (users.stream().iterator().hasNext()) {
User user = users.stream().iterator().next();
Assertions.assertEquals(testUser, user);
}
container.deleteItem(testUser, new CosmosItemRequestOptions());
CosmosPagedIterable<User> usersAfterDeletion = container.queryItems("SELECT * FROM c WHERE c.id = 'testCosmos'",
new CosmosQueryRequestOptions(),
User.class);
Assertions.assertFalse(usersAfterDeletion.iterator().hasNext());
LOGGER.info("CosmosIT end.");
} | class CosmosIT {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosIT.class);
private final String databaseName = "TestDB";
private final String containerName = "Users";
@Autowired
private CosmosClient client;
@Test
} | class CosmosIT {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosIT.class);
private static final String DATABASE_NAME = "TestDB";
private static final String CONTAINER_NAME = "Users";
@Autowired
private CosmosClient client;
@Test
} |
Add `equals` and `hashcode` to the `User` class, and here we can call `Assertions.assertEquals(testUser, user)` | public void testCosmosOperation() {
LOGGER.info("CosmosIT begin.");
User testUser = new User(
"testCosmos",
"testFirstName",
"testLastName",
"test address line two"
);
CosmosContainer container = client.getDatabase(databaseName).getContainer(containerName);
container.createItem(testUser);
CosmosPagedIterable<User> users = container.queryItems("SELECT * FROM c WHERE c.id = 'testCosmos'",
new CosmosQueryRequestOptions(),
User.class);
if (users.stream().iterator().hasNext()) {
User user = users.stream().iterator().next();
Assertions.assertEquals(user.toString(), "testFirstName testLastName, test address line two");
}
container.deleteItem(testUser, new CosmosItemRequestOptions());
CosmosPagedIterable<User> usersDelete = container.queryItems("SELECT * FROM c WHERE c.id = 'testCosmos'",
new CosmosQueryRequestOptions(),
User.class);
Assertions.assertFalse(usersDelete.iterator().hasNext());
LOGGER.info("CosmosIT end.");
} | Assertions.assertEquals(user.toString(), "testFirstName testLastName, test address line two"); | public void testCosmosOperation() {
LOGGER.info("CosmosIT begin.");
User testUser = new User(
"testCosmos",
"testFirstName",
"testLastName",
"test address line two"
);
CosmosContainer container = client.getDatabase(DATABASE_NAME).getContainer(CONTAINER_NAME);
container.createItem(testUser);
CosmosPagedIterable<User> users = container.queryItems("SELECT * FROM c WHERE c.id = 'testCosmos'",
new CosmosQueryRequestOptions(),
User.class);
if (users.stream().iterator().hasNext()) {
User user = users.stream().iterator().next();
Assertions.assertEquals(testUser, user);
}
container.deleteItem(testUser, new CosmosItemRequestOptions());
CosmosPagedIterable<User> usersAfterDeletion = container.queryItems("SELECT * FROM c WHERE c.id = 'testCosmos'",
new CosmosQueryRequestOptions(),
User.class);
Assertions.assertFalse(usersAfterDeletion.iterator().hasNext());
LOGGER.info("CosmosIT end.");
} | class CosmosIT {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosIT.class);
private final String databaseName = "TestDB";
private final String containerName = "Users";
@Autowired
private CosmosClient client;
@Test
} | class CosmosIT {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosIT.class);
private static final String DATABASE_NAME = "TestDB";
private static final String CONTAINER_NAME = "Users";
@Autowired
private CosmosClient client;
@Test
} |
If you are using `block` here, you can use non-reactive repository instead of the `ReactiveCosmosRepository`. So we can remove the `block()` call here. | public void testSpringDataCosmosOperation() {
LOGGER.info("SpringDataCosmosIT begin.");
User testUser = new User(userId, "testFirstName", "testLastName", "test address line one");
userRepository.save(testUser).block();
User user = userRepository.findById(userId).block();
Assertions.assertEquals(user.toString(), "testFirstName testLastName, test address line one");
userRepository.delete(testUser).block();
Assertions.assertNull(userRepository.findById(userId).block());
LOGGER.info("SpringDataCosmosIT end.");
} | userRepository.save(testUser).block(); | public void testSpringDataCosmosOperation() {
LOGGER.info("SpringDataCosmosIT begin.");
User testUser = new User(USER_ID, "testFirstName", "testLastName", "test address line one");
userRepository.save(testUser);
Optional<User> user = userRepository.findById(USER_ID);
Assertions.assertEquals(testUser, user.get());
userRepository.delete(testUser);
Assertions.assertFalse(userRepository.findById(USER_ID).isPresent());
LOGGER.info("SpringDataCosmosIT end.");
} | class SpringDataCosmosIT {
private static final Logger LOGGER = LoggerFactory.getLogger(SpringDataCosmosIT.class);
private final String userId = "testSpringDataCosmos";
@Autowired
private UserRepository userRepository;
@Test
} | class SpringDataCosmosIT {
private static final Logger LOGGER = LoggerFactory.getLogger(SpringDataCosmosIT.class);
private static final String USER_ID = "testSpringDataCosmos";
@Autowired
private UserRepository userRepository;
@Test
} |
same here | public void testSpringDataCosmosOperation() {
LOGGER.info("SpringDataCosmosIT begin.");
User testUser = new User(userId, "testFirstName", "testLastName", "test address line one");
userRepository.save(testUser).block();
User user = userRepository.findById(userId).block();
Assertions.assertEquals(user.toString(), "testFirstName testLastName, test address line one");
userRepository.delete(testUser).block();
Assertions.assertNull(userRepository.findById(userId).block());
LOGGER.info("SpringDataCosmosIT end.");
} | Assertions.assertEquals(user.toString(), "testFirstName testLastName, test address line one"); | public void testSpringDataCosmosOperation() {
LOGGER.info("SpringDataCosmosIT begin.");
User testUser = new User(USER_ID, "testFirstName", "testLastName", "test address line one");
userRepository.save(testUser);
Optional<User> user = userRepository.findById(USER_ID);
Assertions.assertEquals(testUser, user.get());
userRepository.delete(testUser);
Assertions.assertFalse(userRepository.findById(USER_ID).isPresent());
LOGGER.info("SpringDataCosmosIT end.");
} | class SpringDataCosmosIT {
private static final Logger LOGGER = LoggerFactory.getLogger(SpringDataCosmosIT.class);
private final String userId = "testSpringDataCosmos";
@Autowired
private UserRepository userRepository;
@Test
} | class SpringDataCosmosIT {
private static final Logger LOGGER = LoggerFactory.getLogger(SpringDataCosmosIT.class);
private static final String USER_ID = "testSpringDataCosmos";
@Autowired
private UserRepository userRepository;
@Test
} |
Or using this `await(xxx)` | void testSendAndReceiveMessage() throws InterruptedException {
LOGGER.info("EventHubBinderManualModeIT begin.");
EventHubsBinderManualModeIT.LATCH.await(15, TimeUnit.SECONDS);
LOGGER.info("Send a message:" + MESSAGE + ".");
many.emitNext(new GenericMessage<>(MESSAGE), Sinks.EmitFailureHandler.FAIL_FAST);
assertThat(EventHubsBinderManualModeIT.LATCH.await(30, TimeUnit.SECONDS)).isTrue();
LOGGER.info("EventHubBinderManualModeIT end.");
} | EventHubsBinderManualModeIT.LATCH.await(15, TimeUnit.SECONDS); | void testSendAndReceiveMessage() throws InterruptedException {
LOGGER.info("EventHubBinderManualModeIT begin.");
EventHubsBinderManualModeIT.LATCH.await(15, TimeUnit.SECONDS);
LOGGER.info("Send a message:" + MESSAGE + ".");
many.emitNext(new GenericMessage<>(MESSAGE), Sinks.EmitFailureHandler.FAIL_FAST);
assertThat(EventHubsBinderManualModeIT.LATCH.await(30, TimeUnit.SECONDS)).isTrue();
LOGGER.info("EventHubBinderManualModeIT end.");
} | class TestConfig {
@Bean
Sinks.Many<Message<String>> many() {
return Sinks.many().unicast().onBackpressureBuffer();
}
@Bean
Supplier<Flux<Message<String>>> supply(Sinks.Many<Message<String>> many) {
return () -> many.asFlux()
.doOnNext(m -> LOGGER.info("Manually sending message {}", m))
.doOnError(t -> LOGGER.error("Error encountered", t));
}
@Bean
Consumer<Message<String>> consume() {
return message -> {
LOGGER.info("EventHubBinderManualModeIT: New message received: '{}'", message.getPayload());
if (message.getPayload().equals(EventHubsBinderManualModeIT.MESSAGE)) {
Checkpointer checkpointer = (Checkpointer) message.getHeaders().get(AzureHeaders.CHECKPOINTER);
checkpointer.success().handle((r, ex) -> {
Assertions.assertNull(ex);
});
LATCH.countDown();
}
};
}
} | class TestConfig {
@Bean
Sinks.Many<Message<String>> many() {
return Sinks.many().unicast().onBackpressureBuffer();
}
@Bean
Supplier<Flux<Message<String>>> supply(Sinks.Many<Message<String>> many) {
return () -> many.asFlux()
.doOnNext(m -> LOGGER.info("Manually sending message {}", m))
.doOnError(t -> LOGGER.error("Error encountered", t));
}
@Bean
Consumer<Message<String>> consume() {
return message -> {
LOGGER.info("EventHubBinderManualModeIT: New message received: '{}'", message.getPayload());
if (message.getPayload().equals(EventHubsBinderManualModeIT.MESSAGE)) {
Checkpointer checkpointer = (Checkpointer) message.getHeaders().get(AzureHeaders.CHECKPOINTER);
checkpointer.success().handle((r, ex) -> {
Assertions.assertNull(ex);
});
LATCH.countDown();
}
};
}
} |
same here | public void testKeyVaultSecretOperation() {
LOGGER.info("KeyVaultSecretIT begin.");
client.setSecret(name, value);
KeyVaultSecret secret = client.getSecret(name);
Assertions.assertEquals(secret.getValue(), value);
LOGGER.info("KeyVaultSecretIT end.");
} | Assertions.assertEquals(secret.getValue(), value); | public void testKeyVaultSecretOperation() {
LOGGER.info("KeyVaultSecretIT begin.");
client.setSecret(NAME, VALUE);
KeyVaultSecret secret = client.getSecret(NAME);
Assertions.assertEquals(VALUE, secret.getValue());
LOGGER.info("KeyVaultSecretIT end.");
} | class KeyVaultSecretIT {
private static final Logger LOGGER = LoggerFactory.getLogger(KeyVaultSecretIT.class);
private final String name = "sample-key";
private final String value = "sample-value";
@Autowired
private SecretClient client;
@Test
} | class KeyVaultSecretIT {
private static final Logger LOGGER = LoggerFactory.getLogger(KeyVaultSecretIT.class);
private static final String NAME = "sample-key";
private static final String VALUE = "sample-value";
@Autowired
private SecretClient client;
@Test
} |
await or await with timeout? | public void testServiceBusOperation() throws InterruptedException {
LOGGER.info("ServiceBusIT begin.");
senderClient.sendMessage(new ServiceBusMessage(DATA1));
senderClient.sendMessage(new ServiceBusMessage(DATA2));
senderClient.close();
IterableStream<ServiceBusReceivedMessage> receivedMessages = receiverClient.receiveMessages(1);
if (receivedMessages.stream().iterator().hasNext()) {
ServiceBusReceivedMessage message = receivedMessages.stream().iterator().next();
Assertions.assertEquals(DATA1, message.getBody().toString());
receiverClient.complete(message);
}
processorClient.start();
Assertions.assertTrue(processorClient.isRunning());
LATCH.await();
Assertions.assertEquals(DATA2, MESSAGE);
processorClient.close();
Assertions.assertFalse(processorClient.isRunning());
LOGGER.info("ServiceBusIT end.");
} | LATCH.await(); | public void testServiceBusOperation() throws InterruptedException {
LOGGER.info("ServiceBusIT begin.");
senderClient.sendMessage(new ServiceBusMessage(DATA1));
IterableStream<ServiceBusReceivedMessage> receivedMessages = receiverClient.receiveMessages(1);
if (receivedMessages.stream().iterator().hasNext()) {
ServiceBusReceivedMessage message = receivedMessages.stream().iterator().next();
Assertions.assertEquals(DATA1, message.getBody().toString());
receiverClient.complete(message);
}
processorClient.start();
senderClient.sendMessage(new ServiceBusMessage(DATA2));
senderClient.close();
Assertions.assertTrue(processorClient.isRunning());
LATCH.await(15, TimeUnit.SECONDS);
Assertions.assertEquals(DATA2, MESSAGE);
processorClient.close();
Assertions.assertFalse(processorClient.isRunning());
LOGGER.info("ServiceBusIT end.");
} | class TestConfig {
@Bean
ServiceBusRecordMessageListener messageListener() {
return message -> {
MESSAGE = message.getMessage().getBody().toString();
LATCH.countDown();
};
}
@Bean
ServiceBusErrorHandler errorHandler() {
return errorContext -> {
};
}
} | class TestConfig {
@Bean
ServiceBusRecordMessageListener messageListener() {
return message -> {
MESSAGE = message.getMessage().getBody().toString();
LATCH.countDown();
};
}
@Bean
ServiceBusErrorHandler errorHandler() {
return errorContext -> {
};
}
} |
same here | public void testServiceBusJmsOperation() throws InterruptedException {
LOGGER.info("ServiceBusJmsIT begin.");
jmsTemplate.convertAndSend(QUEUE_NAME, data);
LOGGER.info("Send message: {}", data);
String msg = EXCHANGER.exchange(null);
Assertions.assertEquals(msg, data);
LOGGER.info("ServiceBusJmsIT end.");
} | Assertions.assertEquals(msg, data); | public void testServiceBusJmsOperation() throws InterruptedException {
LOGGER.info("ServiceBusJmsIT begin.");
jmsTemplate.convertAndSend(QUEUE_NAME, DATA);
LOGGER.info("Send message: {}", DATA);
String msg = EXCHANGER.exchange(null);
Assertions.assertEquals(DATA, msg);
LOGGER.info("ServiceBusJmsIT end.");
} | class ServiceBusJmsIT {
private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusJmsIT.class);
private final String data = "service bus jms test";
private final String QUEUE_NAME = "que001";
private final Exchanger<String> EXCHANGER = new Exchanger<>();
@Autowired
private JmsTemplate jmsTemplate;
@Test
@JmsListener(destination = QUEUE_NAME, containerFactory = "jmsListenerContainerFactory")
public void receiveQueueMessage(String message) throws InterruptedException {
LOGGER.info("Received message from queue: {}", message);
EXCHANGER.exchange(message);
}
} | class ServiceBusJmsIT {
private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusJmsIT.class);
private static final String DATA = "service bus jms test";
private static final String QUEUE_NAME = "que001";
private final Exchanger<String> EXCHANGER = new Exchanger<>();
@Autowired
private JmsTemplate jmsTemplate;
@Test
@Timeout(70)
@JmsListener(destination = QUEUE_NAME, containerFactory = "jmsListenerContainerFactory")
public void receiveQueueMessage(String message) throws InterruptedException {
LOGGER.info("Received message from queue: {}", message);
EXCHANGER.exchange(message);
}
} |
Just to be consistent, can we use static import here as well and remove `Wiremock`. | public static void beforeClass() {
server = new WireMockServer(WireMockConfiguration.options()
.dynamicPort()
.disableRequestJournal()
.gzipDisabled(true));
server.stubFor(get("/short").willReturn(aResponse().withBody(SHORT_BODY)));
server.stubFor(get("/long").willReturn(aResponse().withBody(LONG_BODY)));
server.stubFor(get("/error").willReturn(aResponse().withBody("error").withStatus(500)));
server.stubFor(WireMock.post("/shortPost").willReturn(aResponse().withBody(SHORT_BODY)));
server.stubFor(get("/connectionClose").willReturn(aResponse().withFault(Fault.RANDOM_DATA_THEN_CLOSE)));
server.start();
} | server.stubFor(WireMock.post("/shortPost").willReturn(aResponse().withBody(SHORT_BODY))); | public static void beforeClass() {
server = new WireMockServer(WireMockConfiguration.options()
.dynamicPort()
.disableRequestJournal()
.gzipDisabled(true));
server.stubFor(get("/short").willReturn(aResponse().withBody(SHORT_BODY)));
server.stubFor(get("/long").willReturn(aResponse().withBody(LONG_BODY)));
server.stubFor(get("/error").willReturn(aResponse().withBody("error").withStatus(500)));
server.stubFor(post("/shortPost").willReturn(aResponse().withBody(SHORT_BODY)));
server.stubFor(get("/connectionClose").willReturn(aResponse().withFault(Fault.RANDOM_DATA_THEN_CLOSE)));
server.start();
} | class JdkAsyncHttpClientTests {
private static final byte[] SHORT_BODY = "hi there".getBytes(StandardCharsets.UTF_8);
private static final byte[] LONG_BODY = createLongBody();
private static WireMockServer server;
@BeforeAll
@AfterAll
public static void afterClass() {
if (server != null) {
server.shutdown();
}
}
@Test
public void testFlowableResponseShortBodyAsByteArrayAsync() {
checkBodyReceived(SHORT_BODY, "/short");
}
@Test
public void testFlowableResponseLongBodyAsByteArrayAsync() {
checkBodyReceived(LONG_BODY, "/long");
}
@Test
public void testMultipleSubscriptionsEmitsError() {
Mono<byte[]> response = getResponse("/short").cache().flatMap(HttpResponse::getBodyAsByteArray);
StepVerifier.create(response)
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(Duration.ofSeconds(20));
StepVerifier.create(response)
.expectNextCount(0)
.expectError(IllegalStateException.class)
.verify(Duration.ofSeconds(20));
}
@Test
public void testFlowableWhenServerReturnsBodyAndNoErrorsWhenHttp500Returned() {
StepVerifier.create(getResponse("/error")
.flatMap(response -> {
assertEquals(500, response.getStatusCode());
return response.getBodyAsString();
}))
.expectNext("error")
.expectComplete()
.verify(Duration.ofSeconds(20));
}
@Test
public void testFlowableBackpressure() {
StepVerifierOptions stepVerifierOptions = StepVerifierOptions.create();
stepVerifierOptions.initialRequest(0);
StepVerifier.create(getResponse("/long").flatMapMany(HttpResponse::getBody), stepVerifierOptions)
.expectNextCount(0)
.thenRequest(1)
.expectNextCount(1)
.thenRequest(3)
.expectNextCount(3)
.thenRequest(Long.MAX_VALUE)
.thenConsumeWhile(ByteBuffer::hasRemaining)
.verifyComplete();
}
@Test
public void testRequestBodyIsErrorShouldPropagateToResponse() {
HttpClient client = new JdkHttpClientProvider().createInstance();
HttpRequest request = new HttpRequest(HttpMethod.POST, url(server, "/shortPost"))
.setHeader("Content-Length", "123")
.setBody(Flux.error(new RuntimeException("boo")));
StepVerifier.create(client.send(request))
.expectErrorMessage("boo")
.verify();
}
@Test
public void testRequestBodyEndsInErrorShouldPropagateToResponse() {
HttpClient client = new JdkHttpClientProvider().createInstance();
String contentChunk = "abcdefgh";
int repetitions = 1000;
HttpRequest request = new HttpRequest(HttpMethod.POST, url(server, "/shortPost"))
.setHeader("Content-Length", String.valueOf(contentChunk.length() * (repetitions + 1)))
.setBody(Flux.just(contentChunk)
.repeat(repetitions)
.map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8)))
.concatWith(Flux.error(new RuntimeException("boo"))));
try {
StepVerifier.create(client.send(request))
.expectErrorMessage("boo")
.verify(Duration.ofSeconds(10));
} catch (Exception ex) {
assertEquals("boo", ex.getMessage());
}
}
@Test
public void testServerShutsDownSocketShouldPushErrorToContentFlowable() {
HttpClient client = new JdkAsyncHttpClientBuilder().build();
HttpRequest request = new HttpRequest(HttpMethod.GET, url(server, "/connectionClose"));
StepVerifier.create(client.send(request).flatMap(HttpResponse::getBodyAsByteArray))
.verifyError(IOException.class);
}
@Test
public void testConcurrentRequests() {
int numRequests = 100;
HttpClient client = new JdkHttpClientProvider().createInstance();
Mono<Long> numBytesMono = Flux.range(1, numRequests)
.parallel(25)
.runOn(Schedulers.boundedElastic())
.flatMap(ignored -> getResponse(client, "/long")
.flatMapMany(HttpResponse::getBodyAsByteArray)
.doOnNext(bytes -> assertArrayEquals(LONG_BODY, bytes)))
.sequential()
.map(buffer -> (long) buffer.length)
.reduce(0L, Long::sum);
StepVerifier.create(numBytesMono)
.expectNext((long) numRequests * LONG_BODY.length)
.expectComplete()
.verify(Duration.ofSeconds(60));
}
private static Mono<HttpResponse> getResponse(String path) {
HttpClient client = new JdkAsyncHttpClientBuilder().build();
return getResponse(client, path);
}
private static Mono<HttpResponse> getResponse(HttpClient client, String path) {
HttpRequest request = new HttpRequest(HttpMethod.GET, url(server, path));
return client.send(request);
}
private static URL url(WireMockServer server, String path) {
try {
return new URL("http:
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}
private static byte[] createLongBody() {
byte[] duplicateBytes = "abcdefghijk".getBytes(StandardCharsets.UTF_8);
byte[] longBody = new byte[duplicateBytes.length * 100000];
for (int i = 0; i < 100000; i++) {
System.arraycopy(duplicateBytes, 0, longBody, i * duplicateBytes.length, duplicateBytes.length);
}
return longBody;
}
private void checkBodyReceived(byte[] expectedBody, String path) {
HttpClient client = new JdkAsyncHttpClientBuilder().build();
StepVerifier.create(doRequest(client, path).flatMap(HttpResponse::getBodyAsByteArray))
.assertNext(bytes -> Assertions.assertArrayEquals(expectedBody, bytes))
.verifyComplete();
}
private Mono<HttpResponse> doRequest(HttpClient client, String path) {
HttpRequest request = new HttpRequest(HttpMethod.GET, url(server, path));
return client.send(request);
}
} | class JdkAsyncHttpClientTests {
private static final byte[] SHORT_BODY = "hi there".getBytes(StandardCharsets.UTF_8);
private static final byte[] LONG_BODY = createLongBody();
private static final StepVerifierOptions EMPTY_INITIAL_REQUEST_OPTIONS = StepVerifierOptions.create()
.initialRequest(0);
private static WireMockServer server;
@BeforeAll
@AfterAll
public static void afterClass() {
if (server != null) {
server.shutdown();
}
}
@Test
public void testFlowableResponseShortBodyAsByteArrayAsync() {
checkBodyReceived(SHORT_BODY, "/short");
}
@Test
public void testFlowableResponseLongBodyAsByteArrayAsync() {
checkBodyReceived(LONG_BODY, "/long");
}
@Test
public void testMultipleSubscriptionsEmitsError() {
Mono<byte[]> response = getResponse("/short").cache().flatMap(HttpResponse::getBodyAsByteArray);
StepVerifier.create(response)
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(Duration.ofSeconds(20));
StepVerifier.create(response)
.expectNextCount(0)
.expectError(IllegalStateException.class)
.verify(Duration.ofSeconds(20));
}
@Test
public void testFlowableWhenServerReturnsBodyAndNoErrorsWhenHttp500Returned() {
StepVerifier.create(getResponse("/error")
.flatMap(response -> {
assertEquals(500, response.getStatusCode());
return response.getBodyAsString();
}))
.expectNext("error")
.expectComplete()
.verify(Duration.ofSeconds(20));
}
@Test
public void testFlowableBackpressure() {
StepVerifier.create(getResponse("/long").flatMapMany(HttpResponse::getBody), EMPTY_INITIAL_REQUEST_OPTIONS)
.expectNextCount(0)
.thenRequest(1)
.expectNextCount(1)
.thenRequest(3)
.expectNextCount(3)
.thenRequest(Long.MAX_VALUE)
.thenConsumeWhile(ByteBuffer::hasRemaining)
.verifyComplete();
}
@Test
public void testRequestBodyIsErrorShouldPropagateToResponse() {
HttpClient client = new JdkHttpClientProvider().createInstance();
HttpRequest request = new HttpRequest(HttpMethod.POST, url(server, "/shortPost"))
.setHeader("Content-Length", "123")
.setBody(Flux.error(new RuntimeException("boo")));
StepVerifier.create(client.send(request))
.expectErrorMessage("boo")
.verify();
}
@Test
public void testRequestBodyEndsInErrorShouldPropagateToResponse() {
HttpClient client = new JdkHttpClientProvider().createInstance();
String contentChunk = "abcdefgh";
int repetitions = 1000;
HttpRequest request = new HttpRequest(HttpMethod.POST, url(server, "/shortPost"))
.setHeader("Content-Length", String.valueOf(contentChunk.length() * (repetitions + 1)))
.setBody(Flux.just(contentChunk)
.repeat(repetitions)
.map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8)))
.concatWith(Flux.error(new RuntimeException("boo"))));
try {
StepVerifier.create(client.send(request))
.expectErrorMessage("boo")
.verify(Duration.ofSeconds(10));
} catch (Exception ex) {
assertEquals("boo", ex.getMessage());
}
}
@Test
public void testServerShutsDownSocketShouldPushErrorToContentFlowable() {
HttpClient client = new JdkAsyncHttpClientBuilder().build();
HttpRequest request = new HttpRequest(HttpMethod.GET, url(server, "/connectionClose"));
StepVerifier.create(client.send(request).flatMap(HttpResponse::getBodyAsByteArray))
.verifyError(IOException.class);
}
@Test
public void testConcurrentRequests() {
int numRequests = 100;
HttpClient client = new JdkHttpClientProvider().createInstance();
Mono<Long> numBytesMono = Flux.range(1, numRequests)
.parallel(25)
.runOn(Schedulers.boundedElastic())
.flatMap(ignored -> getResponse(client, "/long")
.flatMapMany(HttpResponse::getBodyAsByteArray)
.doOnNext(bytes -> assertArrayEquals(LONG_BODY, bytes)))
.sequential()
.map(buffer -> (long) buffer.length)
.reduce(0L, Long::sum);
StepVerifier.create(numBytesMono)
.expectNext((long) numRequests * LONG_BODY.length)
.expectComplete()
.verify(Duration.ofSeconds(60));
}
private static Mono<HttpResponse> getResponse(String path) {
HttpClient client = new JdkAsyncHttpClientBuilder().build();
return getResponse(client, path);
}
private static Mono<HttpResponse> getResponse(HttpClient client, String path) {
HttpRequest request = new HttpRequest(HttpMethod.GET, url(server, path));
return client.send(request);
}
private static URL url(WireMockServer server, String path) {
try {
return new URL("http:
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}
private static byte[] createLongBody() {
byte[] duplicateBytes = "abcdefghijk".getBytes(StandardCharsets.UTF_8);
byte[] longBody = new byte[duplicateBytes.length * 100000];
for (int i = 0; i < 100000; i++) {
System.arraycopy(duplicateBytes, 0, longBody, i * duplicateBytes.length, duplicateBytes.length);
}
return longBody;
}
private void checkBodyReceived(byte[] expectedBody, String path) {
HttpClient client = new JdkAsyncHttpClientBuilder().build();
StepVerifier.create(doRequest(client, path).flatMap(HttpResponse::getBodyAsByteArray))
.assertNext(bytes -> Assertions.assertArrayEquals(expectedBody, bytes))
.verifyComplete();
}
private Mono<HttpResponse> doRequest(HttpClient client, String path) {
HttpRequest request = new HttpRequest(HttpMethod.GET, url(server, path));
return client.send(request);
}
} |
nit: This should say a timeout exception will occur if the blob create container call takes longer than 3 seconds to complete as this could not throw if it took 2 seconds | public static void main(String[] args) throws IOException {
/*
* From the Azure portal, get your Storage account's name and account key.
*/
String accountName = SampleHelper.getAccountName();
String accountKey = SampleHelper.getAccountKey();
/*
* Use your Storage account's name and key to create a credential object; this is used to access your account.
*/
StorageSharedKeyCredential credential = new StorageSharedKeyCredential(accountName, accountKey);
/*
* From the Azure portal, get your Storage account blob service URL endpoint.
* The URL typically looks like this:
*/
String endpoint = String.format(Locale.ROOT, "https:
/*
* Set up a HttpPipelinePolicy that sets duration timeout per call of 5 seconds.
*/
HttpPipelinePolicy mockPolicy = new HttpPipelinePolicy() {
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return next.process().delayElement(Duration.ofSeconds(5L));
}
public HttpPipelinePosition getPipelinePosition() {
return HttpPipelinePosition.PER_CALL;
}
};
/*
* Create a BlobServiceClient object that wraps the service endpoint, credential, policy with
* timeout per call, and a request pipeline.
*/
BlobServiceClient storageClient = new BlobServiceClientBuilder()
.endpoint(endpoint)
.credential(credential)
.addPolicy(mockPolicy)
.buildClient();
/*
* This example shows how to pass timeouts in call operations.
*/
/*
* Create a client that references a to-be-created container in your Azure Storage account. This returns a
* ContainerClient object that wraps the container's endpoint, credential and a request pipeline (inherited from storageClient).
* Note that container names require lowercase.
*/
BlobContainerClient blobContainerClient = storageClient.getBlobContainerClient("myjavacontainerbasic" + System.currentTimeMillis());
/*
* Create a container in Storage blob account with a timeout duration of 3 seconds, below the timeout duration
* passed in the policy. This will trigger a timeout exception.
*/
try {
blobContainerClient.createIfNotExistsWithResponse(new BlobContainerCreateOptions(), Duration.ofSeconds(3L), Context.NONE);
} catch (Exception ex) {
System.out.println("Creation failed due to timeout: " + ex.getMessage());
}
/*
* Create a client that references a to-be-created container in your Azure Storage account. This returns a
* ContainerClient object that wraps the container's endpoint, credential and a request pipeline (inherited from storageClient).
* Note that container names require lowercase.
*/
blobContainerClient = storageClient.getBlobContainerClient("myjavacontainerbasic" + System.currentTimeMillis());
/*
* Create a container in Storage blob account with a timeout duration of 10 seconds, greater the timeout duration
* passed in the policy. This will succeed.
*/
Response<Boolean> response = blobContainerClient.createIfNotExistsWithResponse(new BlobContainerCreateOptions(), Duration.ofSeconds(10L), Context.NONE);
if (response.getValue()) {
System.out.println("Blob container successfully created.");
}
/*
* Delete the container we created earlier.
*/
blobContainerClient.delete();
} | * passed in the policy. This will trigger a timeout exception. | public static void main(String[] args) throws IOException {
/*
* From the Azure portal, get your Storage account's name and account key.
*/
String accountName = SampleHelper.getAccountName();
String accountKey = SampleHelper.getAccountKey();
/*
* Use your Storage account's name and key to create a credential object; this is used to access your account.
*/
StorageSharedKeyCredential credential = new StorageSharedKeyCredential(accountName, accountKey);
/*
* From the Azure portal, get your Storage account blob service URL endpoint.
* The URL typically looks like this:
*/
String endpoint = String.format(Locale.ROOT, "https:
/*
* Create a BlobServiceClient object that wraps the service endpoint, credential, policy with
* timeout per call, and a request pipeline.
* Note: this is not necessary to implement timeouts. This is only here to allow the sample to be independently
* runnable and demonstrate behavior.
*/
BlobServiceClient storageClient = new BlobServiceClientBuilder()
.endpoint(endpoint)
.credential(credential)
.addPolicy(new TimeoutPolicy())
.buildClient();
/*
* This example shows how to pass timeouts in call operations.
*/
/*
* Create a client that references a to-be-created container in your Azure Storage account. This returns a
* ContainerClient object that wraps the container's endpoint, credential and a request pipeline (inherited from storageClient).
* Note that container names require lowercase.
*/
BlobContainerClient blobContainerClient = storageClient.getBlobContainerClient("myjavacontainerbasic" + System.currentTimeMillis());
/*
* Create a container in Storage blob account with a timeout duration of 3 seconds. A timeout exception will
* occur if the blob create container call takes longer than 3 seconds to complete as this could not throw if
* it took 2 seconds.
*/
try {
blobContainerClient.createIfNotExistsWithResponse(new BlobContainerCreateOptions(), Duration.ofSeconds(3L), Context.NONE);
} catch (Exception ex) {
System.out.println("Creation failed due to timeout: " + ex.getMessage());
}
/*
* Create a client that references a to-be-created container in your Azure Storage account. This returns a
* ContainerClient object that wraps the container's endpoint, credential and a request pipeline (inherited from storageClient).
* Note that container names require lowercase.
*/
blobContainerClient = storageClient.getBlobContainerClient("myjavacontainerbasic" + System.currentTimeMillis());
/*
* Create a container in Storage blob account with a timeout duration of 10 seconds, greater the timeout duration
* passed in the policy. This will succeed.
*/
Response<Boolean> response = blobContainerClient.createIfNotExistsWithResponse(new BlobContainerCreateOptions(), Duration.ofSeconds(10L), Context.NONE);
if (response.getValue()) {
System.out.println("Blob container successfully created.");
}
/*
* Delete the container we created earlier.
*/
blobContainerClient.delete();
} | class OperationalLevelTimeoutExample {
/**
* Entry point into the basic examples for Storage blobs.
*
* @param args Unused. Arguments to the program.
* @throws IOException If an I/O error occurs
* @throws RuntimeException If the downloaded data doesn't match the uploaded data
*/
} | class OperationalLevelTimeoutExample {
/**
* Entry point into the basic examples for Storage blobs.
*
* @param args Unused. Arguments to the program.
* @throws IOException If an I/O error occurs
* @throws RuntimeException If the downloaded data doesn't match the uploaded data
*/
/**
* A simple policy that sets duration timeout per call of 5 seconds.
*/
static class TimeoutPolicy implements HttpPipelinePolicy {
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return next.process().delayElement(Duration.ofSeconds(5L));
}
@Override
public HttpPipelinePosition getPipelinePosition() {
return HttpPipelinePosition.PER_CALL;
}
}
} |
The mock policy should probably be hidden at the bottom in a helper method so it doesn't confuse folks just trying to learn about timeouts. | public static void main(String[] args) throws IOException {
/*
* From the Azure portal, get your Storage account's name and account key.
*/
String accountName = SampleHelper.getAccountName();
String accountKey = SampleHelper.getAccountKey();
/*
* Use your Storage account's name and key to create a credential object; this is used to access your account.
*/
StorageSharedKeyCredential credential = new StorageSharedKeyCredential(accountName, accountKey);
/*
* From the Azure portal, get your Storage account blob service URL endpoint.
* The URL typically looks like this:
*/
String endpoint = String.format(Locale.ROOT, "https:
/*
* Set up a HttpPipelinePolicy that sets duration timeout per call of 5 seconds.
*/
HttpPipelinePolicy mockPolicy = new HttpPipelinePolicy() {
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return next.process().delayElement(Duration.ofSeconds(5L));
}
public HttpPipelinePosition getPipelinePosition() {
return HttpPipelinePosition.PER_CALL;
}
};
/*
* Create a BlobServiceClient object that wraps the service endpoint, credential, policy with
* timeout per call, and a request pipeline.
*/
BlobServiceClient storageClient = new BlobServiceClientBuilder()
.endpoint(endpoint)
.credential(credential)
.addPolicy(mockPolicy)
.buildClient();
/*
* This example shows how to pass timeouts in call operations.
*/
/*
* Create a client that references a to-be-created container in your Azure Storage account. This returns a
* ContainerClient object that wraps the container's endpoint, credential and a request pipeline (inherited from storageClient).
* Note that container names require lowercase.
*/
BlobContainerClient blobContainerClient = storageClient.getBlobContainerClient("myjavacontainerbasic" + System.currentTimeMillis());
/*
* A timeout exception will occur if the blob create container call takes longer than 3 seconds to complete.
*/
try {
blobContainerClient.createIfNotExistsWithResponse(new BlobContainerCreateOptions(), Duration.ofSeconds(3L), Context.NONE);
} catch (Exception ex) {
System.out.println("Creation failed due to timeout: " + ex.getMessage());
}
/*
* Create a client that references a to-be-created container in your Azure Storage account. This returns a
* ContainerClient object that wraps the container's endpoint, credential and a request pipeline (inherited from storageClient).
* Note that container names require lowercase.
*/
blobContainerClient = storageClient.getBlobContainerClient("myjavacontainerbasic" + System.currentTimeMillis());
/*
* Create a container in Storage blob account with a timeout duration of 10 seconds, greater the timeout duration
* passed in the policy. This will succeed.
*/
Response<Boolean> response = blobContainerClient.createIfNotExistsWithResponse(new BlobContainerCreateOptions(), Duration.ofSeconds(10L), Context.NONE);
if (response.getValue()) {
System.out.println("Blob container successfully created.");
}
/*
* Delete the container we created earlier.
*/
blobContainerClient.delete();
} | HttpPipelinePolicy mockPolicy = new HttpPipelinePolicy() { | public static void main(String[] args) throws IOException {
/*
* From the Azure portal, get your Storage account's name and account key.
*/
String accountName = SampleHelper.getAccountName();
String accountKey = SampleHelper.getAccountKey();
/*
* Use your Storage account's name and key to create a credential object; this is used to access your account.
*/
StorageSharedKeyCredential credential = new StorageSharedKeyCredential(accountName, accountKey);
/*
* From the Azure portal, get your Storage account blob service URL endpoint.
* The URL typically looks like this:
*/
String endpoint = String.format(Locale.ROOT, "https:
/*
* Create a BlobServiceClient object that wraps the service endpoint, credential, policy with
* timeout per call, and a request pipeline.
* Note: this is not necessary to implement timeouts. This is only here to allow the sample to be independently
* runnable and demonstrate behavior.
*/
BlobServiceClient storageClient = new BlobServiceClientBuilder()
.endpoint(endpoint)
.credential(credential)
.addPolicy(new TimeoutPolicy())
.buildClient();
/*
* This example shows how to pass timeouts in call operations.
*/
/*
* Create a client that references a to-be-created container in your Azure Storage account. This returns a
* ContainerClient object that wraps the container's endpoint, credential and a request pipeline (inherited from storageClient).
* Note that container names require lowercase.
*/
BlobContainerClient blobContainerClient = storageClient.getBlobContainerClient("myjavacontainerbasic" + System.currentTimeMillis());
/*
* Create a container in Storage blob account with a timeout duration of 3 seconds. A timeout exception will
* occur if the blob create container call takes longer than 3 seconds to complete as this could not throw if
* it took 2 seconds.
*/
try {
blobContainerClient.createIfNotExistsWithResponse(new BlobContainerCreateOptions(), Duration.ofSeconds(3L), Context.NONE);
} catch (Exception ex) {
System.out.println("Creation failed due to timeout: " + ex.getMessage());
}
/*
* Create a client that references a to-be-created container in your Azure Storage account. This returns a
* ContainerClient object that wraps the container's endpoint, credential and a request pipeline (inherited from storageClient).
* Note that container names require lowercase.
*/
blobContainerClient = storageClient.getBlobContainerClient("myjavacontainerbasic" + System.currentTimeMillis());
/*
* Create a container in Storage blob account with a timeout duration of 10 seconds, greater the timeout duration
* passed in the policy. This will succeed.
*/
Response<Boolean> response = blobContainerClient.createIfNotExistsWithResponse(new BlobContainerCreateOptions(), Duration.ofSeconds(10L), Context.NONE);
if (response.getValue()) {
System.out.println("Blob container successfully created.");
}
/*
* Delete the container we created earlier.
*/
blobContainerClient.delete();
} | class OperationalLevelTimeoutExample {
/**
* Entry point into the basic examples for Storage blobs.
*
* @param args Unused. Arguments to the program.
* @throws IOException If an I/O error occurs
* @throws RuntimeException If the downloaded data doesn't match the uploaded data
*/
} | class OperationalLevelTimeoutExample {
/**
* Entry point into the basic examples for Storage blobs.
*
* @param args Unused. Arguments to the program.
* @throws IOException If an I/O error occurs
* @throws RuntimeException If the downloaded data doesn't match the uploaded data
*/
/**
* A simple policy that sets duration timeout per call of 5 seconds.
*/
static class TimeoutPolicy implements HttpPipelinePolicy {
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return next.process().delayElement(Duration.ofSeconds(5L));
}
@Override
public HttpPipelinePosition getPipelinePosition() {
return HttpPipelinePosition.PER_CALL;
}
}
} |
"// this is not necessary to implement timeouts. This is only here to allow the sample to be independently runnable and demonstrate behavior" | public static void main(String[] args) throws IOException {
/*
* From the Azure portal, get your Storage account's name and account key.
*/
String accountName = SampleHelper.getAccountName();
String accountKey = SampleHelper.getAccountKey();
/*
* Use your Storage account's name and key to create a credential object; this is used to access your account.
*/
StorageSharedKeyCredential credential = new StorageSharedKeyCredential(accountName, accountKey);
/*
* From the Azure portal, get your Storage account blob service URL endpoint.
* The URL typically looks like this:
*/
String endpoint = String.format(Locale.ROOT, "https:
/*
* Create a BlobServiceClient object that wraps the service endpoint, credential, policy with
* timeout per call, and a request pipeline.
*/
BlobServiceClient storageClient = new BlobServiceClientBuilder()
.endpoint(endpoint)
.credential(credential)
.addPolicy(new TimeoutPolicy())
.buildClient();
/*
* This example shows how to pass timeouts in call operations.
*/
/*
* Create a client that references a to-be-created container in your Azure Storage account. This returns a
* ContainerClient object that wraps the container's endpoint, credential and a request pipeline (inherited from storageClient).
* Note that container names require lowercase.
*/
BlobContainerClient blobContainerClient = storageClient.getBlobContainerClient("myjavacontainerbasic" + System.currentTimeMillis());
/*
* Create a container in Storage blob account with a timeout duration of 3 seconds. A timeout exception will
* occur if the blob create container call takes longer than 3 seconds to complete as this could not throw if
* it took 2 seconds.
*/
try {
blobContainerClient.createIfNotExistsWithResponse(new BlobContainerCreateOptions(), Duration.ofSeconds(3L), Context.NONE);
} catch (Exception ex) {
System.out.println("Creation failed due to timeout: " + ex.getMessage());
}
/*
* Create a client that references a to-be-created container in your Azure Storage account. This returns a
* ContainerClient object that wraps the container's endpoint, credential and a request pipeline (inherited from storageClient).
* Note that container names require lowercase.
*/
blobContainerClient = storageClient.getBlobContainerClient("myjavacontainerbasic" + System.currentTimeMillis());
/*
* Create a container in Storage blob account with a timeout duration of 10 seconds, greater the timeout duration
* passed in the policy. This will succeed.
*/
Response<Boolean> response = blobContainerClient.createIfNotExistsWithResponse(new BlobContainerCreateOptions(), Duration.ofSeconds(10L), Context.NONE);
if (response.getValue()) {
System.out.println("Blob container successfully created.");
}
/*
* Delete the container we created earlier.
*/
blobContainerClient.delete();
} | .addPolicy(new TimeoutPolicy()) | public static void main(String[] args) throws IOException {
/*
* From the Azure portal, get your Storage account's name and account key.
*/
String accountName = SampleHelper.getAccountName();
String accountKey = SampleHelper.getAccountKey();
/*
* Use your Storage account's name and key to create a credential object; this is used to access your account.
*/
StorageSharedKeyCredential credential = new StorageSharedKeyCredential(accountName, accountKey);
/*
* From the Azure portal, get your Storage account blob service URL endpoint.
* The URL typically looks like this:
*/
String endpoint = String.format(Locale.ROOT, "https:
/*
* Create a BlobServiceClient object that wraps the service endpoint, credential, policy with
* timeout per call, and a request pipeline.
* Note: this is not necessary to implement timeouts. This is only here to allow the sample to be independently
* runnable and demonstrate behavior.
*/
BlobServiceClient storageClient = new BlobServiceClientBuilder()
.endpoint(endpoint)
.credential(credential)
.addPolicy(new TimeoutPolicy())
.buildClient();
/*
* This example shows how to pass timeouts in call operations.
*/
/*
* Create a client that references a to-be-created container in your Azure Storage account. This returns a
* ContainerClient object that wraps the container's endpoint, credential and a request pipeline (inherited from storageClient).
* Note that container names require lowercase.
*/
BlobContainerClient blobContainerClient = storageClient.getBlobContainerClient("myjavacontainerbasic" + System.currentTimeMillis());
/*
* Create a container in Storage blob account with a timeout duration of 3 seconds. A timeout exception will
* occur if the blob create container call takes longer than 3 seconds to complete as this could not throw if
* it took 2 seconds.
*/
try {
blobContainerClient.createIfNotExistsWithResponse(new BlobContainerCreateOptions(), Duration.ofSeconds(3L), Context.NONE);
} catch (Exception ex) {
System.out.println("Creation failed due to timeout: " + ex.getMessage());
}
/*
* Create a client that references a to-be-created container in your Azure Storage account. This returns a
* ContainerClient object that wraps the container's endpoint, credential and a request pipeline (inherited from storageClient).
* Note that container names require lowercase.
*/
blobContainerClient = storageClient.getBlobContainerClient("myjavacontainerbasic" + System.currentTimeMillis());
/*
* Create a container in Storage blob account with a timeout duration of 10 seconds, greater the timeout duration
* passed in the policy. This will succeed.
*/
Response<Boolean> response = blobContainerClient.createIfNotExistsWithResponse(new BlobContainerCreateOptions(), Duration.ofSeconds(10L), Context.NONE);
if (response.getValue()) {
System.out.println("Blob container successfully created.");
}
/*
* Delete the container we created earlier.
*/
blobContainerClient.delete();
} | class OperationalLevelTimeoutExample {
/**
* Entry point into the basic examples for Storage blobs.
*
* @param args Unused. Arguments to the program.
* @throws IOException If an I/O error occurs
* @throws RuntimeException If the downloaded data doesn't match the uploaded data
*/
/**
* A simple policy that sets duration timeout per call of 5 seconds.
*/
static class TimeoutPolicy implements HttpPipelinePolicy {
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return next.process().delayElement(Duration.ofSeconds(5L));
}
@Override
public HttpPipelinePosition getPipelinePosition() {
return HttpPipelinePosition.PER_CALL;
}
}
} | class OperationalLevelTimeoutExample {
/**
* Entry point into the basic examples for Storage blobs.
*
* @param args Unused. Arguments to the program.
* @throws IOException If an I/O error occurs
* @throws RuntimeException If the downloaded data doesn't match the uploaded data
*/
/**
* A simple policy that sets duration timeout per call of 5 seconds.
*/
static class TimeoutPolicy implements HttpPipelinePolicy {
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return next.process().delayElement(Duration.ofSeconds(5L));
}
@Override
public HttpPipelinePosition getPipelinePosition() {
return HttpPipelinePosition.PER_CALL;
}
}
} |
Mock policy should go at the bottom to avoid confusion and include the comment in the other file about it only being for demonstration purposes. | public static void main(String[] args) throws IOException {
/*
* From the Azure portal, get your Storage account's name and account key.
*/
String accountName = SampleHelper.getAccountName();
String accountKey = SampleHelper.getAccountKey();
/*
* Use your Storage account's name and key to create a credential object; this is used to access your account.
*/
StorageSharedKeyCredential credential = new StorageSharedKeyCredential(accountName, accountKey);
/*
* From the Azure portal, get your Storage account blob service URL endpoint.
* The URL typically looks like this:
*/
String endpoint = String.format(Locale.ROOT, "https:
/*
Use a Request Retry Policy that has a fixed back-off retry policy.
*/
RequestRetryOptions retryOptions = new RequestRetryOptions(RetryPolicyType.FIXED, 2, 3, 1000L, 1500L, null);
HttpResponse mockHttpResponse = new MockHttpResponse(new HttpRequest(HttpMethod.PUT, new URL("https:
HttpPipelinePolicy mockPolicy = new HttpPipelinePolicy() {
int count = 0;
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
System.out.println("Number of retries: " + ++count);
return Mono.just(mockHttpResponse).delayElement(Duration.ofSeconds(5L));
}
};
/*
* Create a BlobServiceClient object that wraps the service endpoint, credential, retry options, and a request pipeline.
*/
BlobServiceClient storageClient = new BlobServiceClientBuilder()
.endpoint(endpoint)
.credential(credential)
.retryOptions(retryOptions)
.addPolicy(mockPolicy)
.buildClient();
/*
* Make a call on the client to trigger the pipeline policy.
*/
try {
storageClient.getProperties();
} catch (Exception ex) {
if (ex.getCause() instanceof TimeoutException) {
System.out.println("Operation failed due to timeout: " + ex.getMessage());
}
}
} | HttpPipelinePolicy mockPolicy = new HttpPipelinePolicy() { | public static void main(String[] args) throws IOException {
/*
* From the Azure portal, get your Storage account's name and account key.
*/
String accountName = SampleHelper.getAccountName();
String accountKey = SampleHelper.getAccountKey();
/*
* Use your Storage account's name and key to create a credential object; this is used to access your account.
*/
StorageSharedKeyCredential credential = new StorageSharedKeyCredential(accountName, accountKey);
/*
* From the Azure portal, get your Storage account blob service URL endpoint.
* The URL typically looks like this:
*/
String endpoint = String.format(Locale.ROOT, "https:
/*
Use a Request Retry Policy that has a fixed back-off retry policy.
*/
RequestRetryOptions retryOptions = new RequestRetryOptions(RetryPolicyType.FIXED, 2, 3, 1000L, 1500L, null);
HttpResponse mockHttpResponse = new MockHttpResponse(new HttpRequest(HttpMethod.PUT, new URL("https:
/*
* Create a BlobServiceClient object that wraps the service endpoint, credential, retry options, and a request pipeline.
*/
BlobServiceClient storageClient = new BlobServiceClientBuilder()
.endpoint(endpoint)
.credential(credential)
.retryOptions(retryOptions)
.addPolicy(new TimeoutPolicy(mockHttpResponse))
.buildClient();
/*
* Make a call on the client to trigger the pipeline policy.
*/
try {
storageClient.getProperties();
} catch (Exception ex) {
if (ex.getCause() instanceof TimeoutException) {
System.out.println("Operation failed due to timeout: " + ex.getMessage());
}
}
} | class RequestLevelTimeoutExample {
/**
* Entry point into the basic examples for Storage blobs.
*
* @param args Unused. Arguments to the program.
* @throws IOException If an I/O error occurs
* @throws RuntimeException If the downloaded data doesn't match the uploaded data
*/
static class MockHttpResponse extends HttpResponse {
int statusCode;
protected MockHttpResponse(HttpRequest request, int code) {
super(request);
this.statusCode = code;
}
@Override
public int getStatusCode() {
return statusCode;
}
@Override
public String getHeaderValue(String s) {
return null;
}
@Override
public HttpHeaders getHeaders() {
return new HttpHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
return Flux.empty();
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return Mono.just(new byte[0]);
}
@Override
public Mono<String> getBodyAsString() {
return Mono.just("");
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return Mono.just("");
}
}
} | class RequestLevelTimeoutExample {
/**
* Entry point into the basic examples for Storage blobs.
*
* @param args Unused. Arguments to the program.
* @throws IOException If an I/O error occurs
* @throws RuntimeException If the downloaded data doesn't match the uploaded data
*/
/**
* A simple policy that sets duration timeout per call of 5 seconds.
*/
static class TimeoutPolicy implements HttpPipelinePolicy {
HttpResponse mockHttpResponse;
int count;
TimeoutPolicy(HttpResponse httpResponse) {
mockHttpResponse = httpResponse;
count = 0;
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
System.out.println("Number of retries: " + ++count);
return Mono.just(mockHttpResponse).delayElement(Duration.ofSeconds(5L));
}
@Override
public HttpPipelinePosition getPipelinePosition() {
return HttpPipelinePosition.PER_CALL;
}
}
static class MockHttpResponse extends HttpResponse {
int statusCode;
protected MockHttpResponse(HttpRequest request, int code) {
super(request);
this.statusCode = code;
}
@Override
public int getStatusCode() {
return statusCode;
}
@Override
public String getHeaderValue(String s) {
return null;
}
@Override
public HttpHeaders getHeaders() {
return new HttpHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
return Flux.empty();
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return Mono.just(new byte[0]);
}
@Override
public Mono<String> getBodyAsString() {
return Mono.just("");
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return Mono.just("");
}
}
} |
```suggestion public BinaryData getBodyAsBinaryData() { Flux<ByteBuffer> body = getBody(); if (body !=null) { return BinaryDataHelper.createBinaryData(new FluxByteBufferContent(body)); } else { return null; } } ``` | public BinaryData getBodyAsBinaryData() {
return BinaryDataHelper.createBinaryData(new FluxByteBufferContent(getBody()));
} | } | public BinaryData getBodyAsBinaryData() {
Flux<ByteBuffer> body = getBody();
if (body != null) {
return BinaryDataHelper.createBinaryData(new FluxByteBufferContent(body));
} else {
return null;
}
} | class HttpResponse implements Closeable {
private static final ClientLogger LOGGER = new ClientLogger(HttpResponse.class);
private final HttpRequest request;
/**
* Creates an instance of {@link HttpResponse}.
*
* @param request The {@link HttpRequest} that resulted in this {@link HttpResponse}.
*/
protected HttpResponse(HttpRequest request) {
this.request = request;
}
/**
* Get the response status code.
*
* @return The response status code
*/
public abstract int getStatusCode();
/**
* Lookup a response header with the provided name.
*
* @param name the name of the header to lookup.
* @return the value of the header, or null if the header doesn't exist in the response.
*/
public abstract String getHeaderValue(String name);
/**
* Get all response headers.
*
* @return the response headers
*/
public abstract HttpHeaders getHeaders();
/**
* Get the publisher emitting response content chunks.
* <p>
* Returns a stream of the response's body content. Emissions may occur on Reactor threads which should not be
* blocked. Blocking should be avoided as much as possible/practical in reactive programming but if you do use
* methods like {@code block()} on the stream then be sure to use {@code publishOn} before the blocking call.
*
* @return The response's content as a stream of {@link ByteBuffer}.
*/
public abstract Flux<ByteBuffer> getBody();
/**
* Gets the {@link BinaryData} that represents the body of the response.
*
* Subclasses should override this method.
*
* @return The {@link BinaryData} response body.
*/
/**
* Gets the response content as a {@code byte[]}.
*
* @return The response content as a {@code byte[]}.
*/
public abstract Mono<byte[]> getBodyAsByteArray();
/**
* Gets the response content as a {@link String}.
* <p>
* By default this method will inspect the response body for containing a byte order mark (BOM) to determine the
* encoding of the string (UTF-8, UTF-16, etc.). If a BOM isn't found this will default to using UTF-8 as the
* encoding, if a specific encoding is required use {@link
*
* @return The response content as a {@link String}.
*/
public abstract Mono<String> getBodyAsString();
/**
* Gets the response content as a {@link String}.
*
* @param charset The {@link Charset} to use as the string encoding.
* @return The response content as a {@link String}.
*/
public abstract Mono<String> getBodyAsString(Charset charset);
/**
* Gets the response content as an {@link InputStream}.
*
* @return The response content as an {@link InputStream}.
*/
public Mono<InputStream> getBodyAsInputStream() {
return getBodyAsByteArray().map(ByteArrayInputStream::new);
}
/**
* Gets the {@link HttpRequest request} which resulted in this response.
*
* @return The {@link HttpRequest request} which resulted in this response.
*/
public final HttpRequest getRequest() {
return request;
}
/**
* Gets a new {@link HttpResponse response} object wrapping this response with its content buffered into memory.
*
* @return A new {@link HttpResponse response} with the content buffered.
*/
public HttpResponse buffer() {
return new BufferedHttpResponse(this);
}
/**
* Writes body content to {@link OutputStream}.
* @param outputStream {@link OutputStream}.
* @throws IOException if an I/O error occurs when reading or writing.
*/
public void writeBodyTo(OutputStream outputStream) throws IOException {
BinaryData bodyAsBinaryData = getBodyAsBinaryData();
if (bodyAsBinaryData != null) {
StreamUtils.INSTANCE.transfer(bodyAsBinaryData.toStream(), outputStream);
}
}
/**
* Writes body content to {@link AsynchronousFileChannel}.
* @param asynchronousFileChannel {@link AsynchronousFileChannel}.
* @param position The position in the file to begin writing the {@code content}.
* @return A {@link Mono} which emits a completion status once the body content has been written to the {@link
* AsynchronousFileChannel}.
*/
public Mono<Void> writeBodyTo(AsynchronousFileChannel asynchronousFileChannel, long position) {
return FluxUtil.writeFile(getBody(), asynchronousFileChannel, position);
}
/**
* Writes body content to {@link FileChannel}.
* @param fileChannel {@link FileChannel}.
* @param position The position in the file to begin writing the {@code content}.
* @throws IOException if an I/O error occurs when reading or writing.
*/
public void writeBodyTo(FileChannel fileChannel, long position) throws IOException {
FluxUtil.writeFile(getBody(), fileChannel, position).block();
}
/**
* Closes the response content stream, if any.
*/
@Override
public void close() {
}
} | class HttpResponse implements Closeable {
private final HttpRequest request;
/**
* Creates an instance of {@link HttpResponse}.
*
* @param request The {@link HttpRequest} that resulted in this {@link HttpResponse}.
*/
protected HttpResponse(HttpRequest request) {
this.request = request;
}
/**
* Get the response status code.
*
* @return The response status code
*/
public abstract int getStatusCode();
/**
* Lookup a response header with the provided name.
*
* @param name the name of the header to lookup.
* @return the value of the header, or null if the header doesn't exist in the response.
*/
public abstract String getHeaderValue(String name);
/**
* Get all response headers.
*
* @return the response headers
*/
public abstract HttpHeaders getHeaders();
/**
* Get the publisher emitting response content chunks.
* <p>
* Returns a stream of the response's body content. Emissions may occur on Reactor threads which should not be
* blocked. Blocking should be avoided as much as possible/practical in reactive programming but if you do use
* methods like {@code block()} on the stream then be sure to use {@code publishOn} before the blocking call.
*
* @return The response's content as a stream of {@link ByteBuffer}.
*/
public abstract Flux<ByteBuffer> getBody();
/**
* Gets the {@link BinaryData} that represents the body of the response.
*
* Subclasses should override this method.
*
* @return The {@link BinaryData} response body.
*/
/**
* Gets the response content as a {@code byte[]}.
*
* @return The response content as a {@code byte[]}.
*/
public abstract Mono<byte[]> getBodyAsByteArray();
/**
* Gets the response content as a {@link String}.
* <p>
* By default this method will inspect the response body for containing a byte order mark (BOM) to determine the
* encoding of the string (UTF-8, UTF-16, etc.). If a BOM isn't found this will default to using UTF-8 as the
* encoding, if a specific encoding is required use {@link
*
* @return The response content as a {@link String}.
*/
public abstract Mono<String> getBodyAsString();
/**
* Gets the response content as a {@link String}.
*
* @param charset The {@link Charset} to use as the string encoding.
* @return The response content as a {@link String}.
*/
public abstract Mono<String> getBodyAsString(Charset charset);
/**
* Gets the response content as an {@link InputStream}.
*
* @return The response content as an {@link InputStream}.
*/
public Mono<InputStream> getBodyAsInputStream() {
return getBodyAsByteArray().map(ByteArrayInputStream::new);
}
/**
* Gets the {@link HttpRequest request} which resulted in this response.
*
* @return The {@link HttpRequest request} which resulted in this response.
*/
public final HttpRequest getRequest() {
return request;
}
/**
* Gets a new {@link HttpResponse response} object wrapping this response with its content buffered into memory.
*
* @return A new {@link HttpResponse response} with the content buffered.
*/
public HttpResponse buffer() {
return new BufferedHttpResponse(this);
}
/**
* Closes the response content stream, if any.
*/
@Override
public void close() {
}
} |
Let's not add this to public API surface. (isRestProxySyncProxyEnabled) This switch will be eventually taken away. We should use "magic private string constant" in places we need this. | public Object invoke(Object proxy, final Method method, Object[] args) {
RestProxyUtils.validateResumeOperationIsNotPresent(method);
final SwaggerMethodParser methodParser = getMethodParser(method);
RequestOptions options = methodParser.setRequestOptions(args);
Context context = methodParser.setContext(args);
boolean isReactive = methodParser.isReactive();
boolean isStreamResponseType = methodParser.isStreamResponse();
boolean syncRestProxyEnabled = Contexts.with(context).isRestProxySyncProxyEnabled();
if (isReactive || isStreamResponseType || !syncRestProxyEnabled) {
return asyncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
} else {
return syncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
}
} | boolean syncRestProxyEnabled = Contexts.with(context).isRestProxySyncProxyEnabled(); | public Object invoke(Object proxy, final Method method, Object[] args) {
RestProxyUtils.validateResumeOperationIsNotPresent(method);
final SwaggerMethodParser methodParser = getMethodParser(method);
RequestOptions options = methodParser.setRequestOptions(args);
Context context = methodParser.setContext(args);
boolean isReactive = methodParser.isReactive();
boolean isStreamResponseType = methodParser.isStreamResponse();
boolean syncRestProxyEnabled = (boolean) context.getData(HTTP_REST_PROXY_SYNC_PROXY_ENABLE).orElse(false);
if (isReactive || isStreamResponseType || !syncRestProxyEnabled) {
return asyncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
} else {
return syncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
}
} | class RestProxy implements InvocationHandler {
private final SwaggerInterfaceParser interfaceParser;
private final AsyncRestProxy asyncRestProxy;
private final HttpPipeline httpPipeline;
private final SyncRestProxy syncRestProxy;
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
* this RestProxy "implements".
*/
private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
this.interfaceParser = interfaceParser;
this.asyncRestProxy = new AsyncRestProxy(httpPipeline, serializer, interfaceParser);
this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser);
this.httpPipeline = httpPipeline;
}
/**
* Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this
* RestProxy was created to "implement".
*
* @param method the method to get a SwaggerMethodParser for
* @return the SwaggerMethodParser for the provided method
*/
private SwaggerMethodParser getMethodParser(Method method) {
return interfaceParser.getMethodParser(method);
}
/**
* Send the provided request asynchronously, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send
* @param contextData the context
* @return a {@link Mono} that emits HttpResponse asynchronously
*/
public Mono<HttpResponse> send(HttpRequest request, Context contextData) {
return httpPipeline.send(request, contextData);
}
@Override
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface) {
return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) {
return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests
* @param serializer the serializer that will be used to convert POJOs to and from request and response bodies
* @param <A> the type of the Swagger interface.
* @return a proxy implementation of the provided Swagger interface
*/
@SuppressWarnings("unchecked")
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) {
final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer);
final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser);
return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface},
restProxy);
}
} | class RestProxy implements InvocationHandler {
private final SwaggerInterfaceParser interfaceParser;
private final AsyncRestProxy asyncRestProxy;
private final HttpPipeline httpPipeline;
private final SyncRestProxy syncRestProxy;
private static final String HTTP_REST_PROXY_SYNC_PROXY_ENABLE = "com.azure.core.http.restproxy.syncproxy.enable";
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
* this RestProxy "implements".
*/
private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
this.interfaceParser = interfaceParser;
this.asyncRestProxy = new AsyncRestProxy(httpPipeline, serializer, interfaceParser);
this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser);
this.httpPipeline = httpPipeline;
}
/**
* Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this
* RestProxy was created to "implement".
*
* @param method the method to get a SwaggerMethodParser for
* @return the SwaggerMethodParser for the provided method
*/
private SwaggerMethodParser getMethodParser(Method method) {
return interfaceParser.getMethodParser(method);
}
/**
* Send the provided request asynchronously, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send
* @param contextData the context
* @return a {@link Mono} that emits HttpResponse asynchronously
*/
public Mono<HttpResponse> send(HttpRequest request, Context contextData) {
return httpPipeline.send(request, contextData);
}
@Override
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface) {
return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) {
return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests
* @param serializer the serializer that will be used to convert POJOs to and from request and response bodies
* @param <A> the type of the Swagger interface.
* @return a proxy implementation of the provided Swagger interface
*/
@SuppressWarnings("unchecked")
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) {
final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer);
final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser);
return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface},
restProxy);
}
} |
This will make typo and deprecation permanent. | public void customize(LibraryCustomization customization, Logger logger) {
PackageCustomization implementationModels = customization.getPackage("com.azure.storage.blob.implementation.models");
implementationModels.getClass("BlobHierarchyListSegment").addAnnotation("@JsonDeserialize(using = com.azure.storage.blob.implementation.util.CustomHierarchicalListingDeserializer.class)");
implementationModels.getClass("BlobPrefix").rename("BlobPrefixInternal");
PackageCustomization models = customization.getPackage("com.azure.storage.blob.models");
models.getClass("PageList").addAnnotation("@JsonDeserialize(using = PageListDeserializer.class)");
models.getClass("PageList").getMethod("getNextMarker").setModifier(Modifier.PRIVATE);
models.getClass("PageList").getMethod("setNextMarker").setModifier(Modifier.PRIVATE);
String pageListFileName = "src/main/java/com/azure/storage/blob/models/PageList.java";
String fileContent = customization.getRawEditor().getFileContent(pageListFileName);
int startImportIndex = fileContent.indexOf("import com.azure.core.annotation.Fluent;") + 40;
int startStaticIndex = fileContent.indexOf("class PageList {") + 16;
String updatedFileContent = fileContent.substring(0, startImportIndex)
+ "import com.azure.storage.blob.implementation.models.PageListHelper;"
+ fileContent.substring(startImportIndex, startStaticIndex)
+ "static {\n"
+ " PageListHelper.setAccessor(new PageListHelper.PageListAccessor() {\n"
+ " @Override\n"
+ " public String getNextMarker(PageList pageList) {\n"
+ " return pageList.getNextMarker();\n"
+ " }\n"
+ "\n"
+ " @Override\n"
+ " public PageList setNextMarker(PageList pageList, String marker) {\n"
+ " return pageList.setNextMarker(marker);\n"
+ " }\n"
+ " });\n"
+ " } "
+ fileContent.substring(startStaticIndex);
customization.getRawEditor().removeFile(pageListFileName);
customization.getRawEditor().addFile(pageListFileName, updatedFileContent);
models.getClass("BlobCopySourceTags").rename("BlobCopySourceTagsMode");
ClassCustomization blobHttpHeaders = models.getClass("BlobHttpHeaders");
blobHttpHeaders.getMethod("getContentMd5").getJavadoc().setDescription("Get the contentMd5 property: " +
"Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for " +
"the individual blocks were validated when each was uploaded. The value does not need to be base64 " +
"encoded as the SDK will perform the encoding.");
blobHttpHeaders.getMethod("setContentMd5").getJavadoc().setDescription("Set the contentMd5 property: " +
"Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for " +
"the individual blocks were validated when each was uploaded. The value does not need to be base64 " +
"encoded as the SDK will perform the encoding.");
ClassCustomization blobContainerEncryptionScope = models.getClass("BlobContainerEncryptionScope");
blobContainerEncryptionScope.getMethod("isEncryptionScopeOverridePrevented")
.setReturnType("boolean", "return Boolean.TRUE.equals(%s);", true);
blobHttpHeaders.removeAnnotation("@JacksonXmlRootElement")
.addAnnotation("@JacksonXmlRootElement(localName = \"blob-http-headers\")");
blobContainerEncryptionScope.removeAnnotation("@JacksonXmlRootElement")
.addAnnotation("@JacksonXmlRootElement(localName = \"blob-container-encryption-scope\")");
models.getClass("CpkInfo").removeAnnotation("@JacksonXmlRootElement")
.addAnnotation("@JacksonXmlRootElement(localName = \"cpk-info\")");
models.getClass("BlobMetrics").removeAnnotation("@JacksonXmlRootElement")
.addAnnotation("@JacksonXmlRootElement(localName = \"Metrics\")");
models.getClass("BlobAnalyticsLogging").removeAnnotation("@JacksonXmlRootElement")
.addAnnotation("@JacksonXmlRootElement(localName = \"Logging\")");
models.getClass("BlobRetentionPolicy").removeAnnotation("@JacksonXmlRootElement")
.addAnnotation("@JacksonXmlRootElement(localName = \"RetentionPolicy\")");
models.getClass("BlobServiceStatistics").removeAnnotation("@JacksonXmlRootElement")
.addAnnotation("@JacksonXmlRootElement(localName = \"StorageServiceStats\")");
models.getClass("BlobSignedIdentifier").removeAnnotation("@JacksonXmlRootElement")
.addAnnotation("@JacksonXmlRootElement(localName = \"SignedIdentifier\")");
models.getClass("BlobAccessPolicy").removeAnnotation("@JacksonXmlRootElement")
.addAnnotation("@JacksonXmlRootElement(localName = \"AccessPolicy\")");
ClassCustomization blobContainerItemProperties = models.getClass("BlobContainerItemProperties");
blobContainerItemProperties.getMethod("isEncryptionScopeOverridePrevented")
.setReturnType("boolean", "return Boolean.TRUE.equals(%s);", true);
blobContainerItemProperties.getMethod("setIsImmutableStorageWithVersioningEnabled")
.rename("setImmutableStorageWithVersioningEnabled");
blobContainerItemProperties.getMethod("setEncryptionScopeOverridePrevented")
.replaceParameters("boolean encryptionScopeOverridePrevented");
ClassCustomization block = models.getClass("Block");
block.getMethod("getSizeInt")
.rename("getSize")
.addAnnotation("@Deprecated")
.setReturnType("int", "return (int) this.sizeLong;
.getJavadoc()
.setDeprecated("Use {@link
block.getMethod("setSizeInt")
.rename("setSize")
.addAnnotation("@Deprecated")
.setReturnType("Block", "return %s.setSizeLong((long) sizeInt);", true)
.getJavadoc()
.setDeprecated("Use {@link
ClassCustomization listBlobsIncludeItem = models.getClass("ListBlobsIncludeItem");
listBlobsIncludeItem.renameEnumMember("IMMUTABILITYPOLICY", "IMMUTABILITY_POLICY")
.renameEnumMember("LEGALHOLD", "LEGAL_HOLD")
.renameEnumMember("DELETEDWITHVERSIONS", "DELETED_WITH_VERSIONS");
String blobErrorCodeFile = "src/main/java/com/azure/storage/blob/models/BlobErrorCode.java";
String blobErrorCodeFileContent = customization.getRawEditor().getFileContent(blobErrorCodeFile);
blobErrorCodeFileContent = blobErrorCodeFileContent.replaceAll("SnaphotOperationRateExceeded", "SnapshotOperationRateExceeded");
customization.getRawEditor().replaceFile(blobErrorCodeFile, blobErrorCodeFileContent);
ClassCustomization blobErrorCode = models.getClass("BlobErrorCode");
blobErrorCode.getConstant("SNAPHOT_OPERATION_RATE_EXCEEDED")
.addAnnotation("@Deprecated")
.getJavadoc()
.setDeprecated("Please use {@link BlobErrorCode
}
} | .setDeprecated("Please use {@link BlobErrorCode | public void customize(LibraryCustomization customization, Logger logger) {
PackageCustomization implementationModels = customization.getPackage("com.azure.storage.blob.implementation.models");
implementationModels.getClass("BlobHierarchyListSegment").addAnnotation("@JsonDeserialize(using = com.azure.storage.blob.implementation.util.CustomHierarchicalListingDeserializer.class)");
implementationModels.getClass("BlobPrefix").rename("BlobPrefixInternal");
PackageCustomization models = customization.getPackage("com.azure.storage.blob.models");
models.getClass("PageList").addAnnotation("@JsonDeserialize(using = PageListDeserializer.class)");
models.getClass("PageList").getMethod("getNextMarker").setModifier(Modifier.PRIVATE);
models.getClass("PageList").getMethod("setNextMarker").setModifier(Modifier.PRIVATE);
String pageListFileName = "src/main/java/com/azure/storage/blob/models/PageList.java";
String fileContent = customization.getRawEditor().getFileContent(pageListFileName);
int startImportIndex = fileContent.indexOf("import com.azure.core.annotation.Fluent;") + 40;
int startStaticIndex = fileContent.indexOf("class PageList {") + 16;
String updatedFileContent = fileContent.substring(0, startImportIndex)
+ "import com.azure.storage.blob.implementation.models.PageListHelper;"
+ fileContent.substring(startImportIndex, startStaticIndex)
+ "static {\n"
+ " PageListHelper.setAccessor(new PageListHelper.PageListAccessor() {\n"
+ " @Override\n"
+ " public String getNextMarker(PageList pageList) {\n"
+ " return pageList.getNextMarker();\n"
+ " }\n"
+ "\n"
+ " @Override\n"
+ " public PageList setNextMarker(PageList pageList, String marker) {\n"
+ " return pageList.setNextMarker(marker);\n"
+ " }\n"
+ " });\n"
+ " } "
+ fileContent.substring(startStaticIndex);
customization.getRawEditor().removeFile(pageListFileName);
customization.getRawEditor().addFile(pageListFileName, updatedFileContent);
models.getClass("BlobCopySourceTags").rename("BlobCopySourceTagsMode");
ClassCustomization blobHttpHeaders = models.getClass("BlobHttpHeaders");
blobHttpHeaders.getMethod("getContentMd5").getJavadoc().setDescription("Get the contentMd5 property: " +
"Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for " +
"the individual blocks were validated when each was uploaded. The value does not need to be base64 " +
"encoded as the SDK will perform the encoding.");
blobHttpHeaders.getMethod("setContentMd5").getJavadoc().setDescription("Set the contentMd5 property: " +
"Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for " +
"the individual blocks were validated when each was uploaded. The value does not need to be base64 " +
"encoded as the SDK will perform the encoding.");
ClassCustomization blobContainerEncryptionScope = models.getClass("BlobContainerEncryptionScope");
blobContainerEncryptionScope.getMethod("isEncryptionScopeOverridePrevented")
.setReturnType("boolean", "return Boolean.TRUE.equals(%s);", true);
blobHttpHeaders.removeAnnotation("@JacksonXmlRootElement")
.addAnnotation("@JacksonXmlRootElement(localName = \"blob-http-headers\")");
blobContainerEncryptionScope.removeAnnotation("@JacksonXmlRootElement")
.addAnnotation("@JacksonXmlRootElement(localName = \"blob-container-encryption-scope\")");
models.getClass("CpkInfo").removeAnnotation("@JacksonXmlRootElement")
.addAnnotation("@JacksonXmlRootElement(localName = \"cpk-info\")");
models.getClass("BlobMetrics").removeAnnotation("@JacksonXmlRootElement")
.addAnnotation("@JacksonXmlRootElement(localName = \"Metrics\")");
models.getClass("BlobAnalyticsLogging").removeAnnotation("@JacksonXmlRootElement")
.addAnnotation("@JacksonXmlRootElement(localName = \"Logging\")");
models.getClass("BlobRetentionPolicy").removeAnnotation("@JacksonXmlRootElement")
.addAnnotation("@JacksonXmlRootElement(localName = \"RetentionPolicy\")");
models.getClass("BlobServiceStatistics").removeAnnotation("@JacksonXmlRootElement")
.addAnnotation("@JacksonXmlRootElement(localName = \"StorageServiceStats\")");
models.getClass("BlobSignedIdentifier").removeAnnotation("@JacksonXmlRootElement")
.addAnnotation("@JacksonXmlRootElement(localName = \"SignedIdentifier\")");
models.getClass("BlobAccessPolicy").removeAnnotation("@JacksonXmlRootElement")
.addAnnotation("@JacksonXmlRootElement(localName = \"AccessPolicy\")");
ClassCustomization blobContainerItemProperties = models.getClass("BlobContainerItemProperties");
blobContainerItemProperties.getMethod("isEncryptionScopeOverridePrevented")
.setReturnType("boolean", "return Boolean.TRUE.equals(%s);", true);
blobContainerItemProperties.getMethod("setIsImmutableStorageWithVersioningEnabled")
.rename("setImmutableStorageWithVersioningEnabled");
blobContainerItemProperties.getMethod("setEncryptionScopeOverridePrevented")
.replaceParameters("boolean encryptionScopeOverridePrevented");
ClassCustomization block = models.getClass("Block");
block.getMethod("getSizeInt")
.rename("getSize")
.addAnnotation("@Deprecated")
.setReturnType("int", "return (int) this.sizeLong;
.getJavadoc()
.setDeprecated("Use {@link
block.getMethod("setSizeInt")
.rename("setSize")
.addAnnotation("@Deprecated")
.setReturnType("Block", "return %s.setSizeLong((long) sizeInt);", true)
.getJavadoc()
.setDeprecated("Use {@link
ClassCustomization listBlobsIncludeItem = models.getClass("ListBlobsIncludeItem");
listBlobsIncludeItem.renameEnumMember("IMMUTABILITYPOLICY", "IMMUTABILITY_POLICY")
.renameEnumMember("LEGALHOLD", "LEGAL_HOLD")
.renameEnumMember("DELETEDWITHVERSIONS", "DELETED_WITH_VERSIONS");
String blobErrorCodeFile = "src/main/java/com/azure/storage/blob/models/BlobErrorCode.java";
String blobErrorCodeFileContent = customization.getRawEditor().getFileContent(blobErrorCodeFile);
blobErrorCodeFileContent = blobErrorCodeFileContent.replaceAll("SnaphotOperationRateExceeded", "SnapshotOperationRateExceeded");
customization.getRawEditor().replaceFile(blobErrorCodeFile, blobErrorCodeFileContent);
ClassCustomization blobErrorCode = models.getClass("BlobErrorCode");
blobErrorCode.getConstant("SNAPHOT_OPERATION_RATE_EXCEEDED")
.addAnnotation("@Deprecated")
.getJavadoc()
.setDeprecated("Please use {@link BlobErrorCode
}
} | class BlobStorageCustomization extends Customization {
@Override | class BlobStorageCustomization extends Customization {
@Override |
Do we need this warning log? I am concerned that customers might see this log and blame for perf issues, while at the same time not sure on how much control they have got on this? | private Mac createNewMac() {
if (!this.isMacInstanceCloneable.get()) {
return this.macProvider.get();
}
try {
return (Mac) this.macInstance.clone();
} catch (CloneNotSupportedException e) {
LOGGER.warn(
"Cloning for the {} algorithm with provider {} ({}) not possible - this will " +
"result in less than ideal performance.",
this.macInstance.getAlgorithm(),
this.macInstance.getProvider().toString(),
this.macInstance.getProvider().getInfo());
this.isMacInstanceCloneable.set(false);
return this.macProvider.get();
}
} | LOGGER.warn( | private Mac createNewMac() {
if (!this.isMacInstanceCloneable.get()) {
return this.macProvider.get();
}
try {
return (Mac) this.macInstance.clone();
} catch (CloneNotSupportedException e) {
LOGGER.warn(
"Cloning for the {} algorithm with provider {} ({}) not possible - this will " +
"result in less than ideal performance.",
this.macInstance.getAlgorithm(),
this.macInstance.getProvider().toString(),
this.macInstance.getProvider().getInfo());
this.isMacInstanceCloneable.set(false);
return this.macProvider.get();
}
} | class MacPool {
private static final Logger LOGGER = LoggerFactory.getLogger(MacPool.class);
final Mac macInstance;
final ConcurrentLinkedQueue<Mac> pool;
final AtomicBoolean isMacInstanceCloneable = new AtomicBoolean(true);
final Supplier<Mac> macProvider;
public MacPool(Supplier<Mac> macProvider) {
if (macProvider == null) {
throw new IllegalArgumentException("macProvider");
}
this.macProvider = macProvider;
this.macInstance = macProvider.get();
this.pool = new ConcurrentLinkedQueue<>();
}
public ReUsableMac take() {
Mac cachedInstance = pool.poll();
if (cachedInstance == null) {
cachedInstance = this.createNewMac();
}
return new ReUsableMac(cachedInstance, this);
}
public void give(ReUsableMac closableMac) {
this.pool.add(closableMac.macInstance);
}
/*
* Closable contract forces to add the Throws Exception contract unnecessarily
*/
static class ReUsableMac {
final Mac macInstance;
final MacPool pool;
public ReUsableMac(Mac macInstance, MacPool pool) {
this.macInstance = macInstance;
this.pool = pool;
}
public Mac get() {
return this.macInstance;
}
public void close() {
pool.give(this);
}
}
} | class MacPool {
private static final Logger LOGGER = LoggerFactory.getLogger(MacPool.class);
final Mac macInstance;
final ConcurrentLinkedQueue<Mac> pool;
final AtomicBoolean isMacInstanceCloneable = new AtomicBoolean(true);
final Supplier<Mac> macProvider;
public MacPool(Supplier<Mac> macProvider) {
if (macProvider == null) {
throw new IllegalArgumentException("macProvider");
}
this.macProvider = macProvider;
this.macInstance = macProvider.get();
this.pool = new ConcurrentLinkedQueue<>();
}
public ReUsableMac take() {
Mac cachedInstance = pool.poll();
if (cachedInstance == null) {
cachedInstance = this.createNewMac();
}
return new ReUsableMac(cachedInstance, this);
}
public void give(ReUsableMac closableMac) {
this.pool.add(closableMac.macInstance);
}
/*
* Closable contract forces to add the Throws Exception contract unnecessarily
*/
static class ReUsableMac {
final Mac macInstance;
final MacPool pool;
public ReUsableMac(Mac macInstance, MacPool pool) {
this.macInstance = macInstance;
this.pool = pool;
}
public Mac get() {
return this.macInstance;
}
public void close() {
pool.give(this);
}
}
} |
If a MacSpi does not support cloning - it is due to some special configuration of Security Providers or an uncommon JVM - a single customer has reported issues with the previous behavior - completely failing in that case. So, I actually think the warning is useful - because this is a trade-off between not using common security providers for some reason and a (relatively small) perf hit | private Mac createNewMac() {
if (!this.isMacInstanceCloneable.get()) {
return this.macProvider.get();
}
try {
return (Mac) this.macInstance.clone();
} catch (CloneNotSupportedException e) {
LOGGER.warn(
"Cloning for the {} algorithm with provider {} ({}) not possible - this will " +
"result in less than ideal performance.",
this.macInstance.getAlgorithm(),
this.macInstance.getProvider().toString(),
this.macInstance.getProvider().getInfo());
this.isMacInstanceCloneable.set(false);
return this.macProvider.get();
}
} | LOGGER.warn( | private Mac createNewMac() {
if (!this.isMacInstanceCloneable.get()) {
return this.macProvider.get();
}
try {
return (Mac) this.macInstance.clone();
} catch (CloneNotSupportedException e) {
LOGGER.warn(
"Cloning for the {} algorithm with provider {} ({}) not possible - this will " +
"result in less than ideal performance.",
this.macInstance.getAlgorithm(),
this.macInstance.getProvider().toString(),
this.macInstance.getProvider().getInfo());
this.isMacInstanceCloneable.set(false);
return this.macProvider.get();
}
} | class MacPool {
private static final Logger LOGGER = LoggerFactory.getLogger(MacPool.class);
final Mac macInstance;
final ConcurrentLinkedQueue<Mac> pool;
final AtomicBoolean isMacInstanceCloneable = new AtomicBoolean(true);
final Supplier<Mac> macProvider;
public MacPool(Supplier<Mac> macProvider) {
if (macProvider == null) {
throw new IllegalArgumentException("macProvider");
}
this.macProvider = macProvider;
this.macInstance = macProvider.get();
this.pool = new ConcurrentLinkedQueue<>();
}
public ReUsableMac take() {
Mac cachedInstance = pool.poll();
if (cachedInstance == null) {
cachedInstance = this.createNewMac();
}
return new ReUsableMac(cachedInstance, this);
}
public void give(ReUsableMac closableMac) {
this.pool.add(closableMac.macInstance);
}
/*
* Closable contract forces to add the Throws Exception contract unnecessarily
*/
static class ReUsableMac {
final Mac macInstance;
final MacPool pool;
public ReUsableMac(Mac macInstance, MacPool pool) {
this.macInstance = macInstance;
this.pool = pool;
}
public Mac get() {
return this.macInstance;
}
public void close() {
pool.give(this);
}
}
} | class MacPool {
private static final Logger LOGGER = LoggerFactory.getLogger(MacPool.class);
final Mac macInstance;
final ConcurrentLinkedQueue<Mac> pool;
final AtomicBoolean isMacInstanceCloneable = new AtomicBoolean(true);
final Supplier<Mac> macProvider;
public MacPool(Supplier<Mac> macProvider) {
if (macProvider == null) {
throw new IllegalArgumentException("macProvider");
}
this.macProvider = macProvider;
this.macInstance = macProvider.get();
this.pool = new ConcurrentLinkedQueue<>();
}
public ReUsableMac take() {
Mac cachedInstance = pool.poll();
if (cachedInstance == null) {
cachedInstance = this.createNewMac();
}
return new ReUsableMac(cachedInstance, this);
}
public void give(ReUsableMac closableMac) {
this.pool.add(closableMac.macInstance);
}
/*
* Closable contract forces to add the Throws Exception contract unnecessarily
*/
static class ReUsableMac {
final Mac macInstance;
final MacPool pool;
public ReUsableMac(Mac macInstance, MacPool pool) {
this.macInstance = macInstance;
this.pool = pool;
}
public Mac get() {
return this.macInstance;
}
public void close() {
pool.give(this);
}
}
} |
we should call to httpResponse.close() here. | public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if ("http".equals(context.getHttpRequest().getUrl().getProtocol())) {
throw LOGGER.logExceptionAsError(
new RuntimeException("token credentials require a URL using the HTTPS protocol scheme"));
}
if (context.getHttpRequest().getBody() != null) {
context.getHttpRequest().setBody(context.getHttpRequest().getBodyAsBinaryData().toReplayableBinaryData());
}
HttpPipelineNextSyncPolicy nextPolicy = next.clone();
authorizeRequestSync(context);
HttpResponse httpResponse = next.processSync();
String authHeader = httpResponse.getHeaderValue(WWW_AUTHENTICATE);
if (httpResponse.getStatusCode() == 401 && authHeader != null) {
if (authorizeRequestOnChallengeSync(context, httpResponse)) {
return nextPolicy.processSync();
} else {
return httpResponse;
}
}
return httpResponse;
} | public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if ("http".equals(context.getHttpRequest().getUrl().getProtocol())) {
throw LOGGER.logExceptionAsError(
new RuntimeException("token credentials require a URL using the HTTPS protocol scheme"));
}
if (context.getHttpRequest().getBody() != null) {
context.getHttpRequest().setBody(context.getHttpRequest().getBodyAsBinaryData().toReplayableBinaryData());
}
HttpPipelineNextSyncPolicy nextPolicy = next.clone();
authorizeRequestSync(context);
HttpResponse httpResponse = next.processSync();
String authHeader = httpResponse.getHeaderValue(WWW_AUTHENTICATE);
if (httpResponse.getStatusCode() == 401 && authHeader != null) {
if (authorizeRequestOnChallengeSync(context, httpResponse)) {
httpResponse.close();
return nextPolicy.processSync();
} else {
return httpResponse;
}
}
return httpResponse;
} | class ContainerRegistryCredentialsPolicy extends BearerTokenAuthenticationPolicy {
private static final ClientLogger LOGGER = new ClientLogger(ContainerRegistryCredentialsPolicy.class);
private static final String BEARER = "Bearer";
public static final Pattern AUTHENTICATION_CHALLENGE_PARAMS_PATTERN =
Pattern.compile("(?:(\\w+)=\"([^\"\"]*)\")+");
public static final String WWW_AUTHENTICATE = "WWW-Authenticate";
public static final String SCOPES_PARAMETER = "scope";
public static final String SERVICE_PARAMETER = "service";
public static final String AUTHORIZATION = "Authorization";
private final ContainerRegistryTokenService tokenService;
private final ClientLogger logger = new ClientLogger(ContainerRegistryCredentialsPolicy.class);
/**
* Creates an instance of ContainerRegistryCredentialsPolicy.
*
* @param tokenService the token generation service.
*/
public ContainerRegistryCredentialsPolicy(ContainerRegistryTokenService tokenService) {
super(tokenService);
this.tokenService = tokenService;
}
/**
* Executed before sending the initial request and authenticates the request.
*
* @param context The request context.
* @return A {@link Mono} containing {@link Void}
*/
@Override
public Mono<Void> authorizeRequest(HttpPipelineCallContext context) {
return Mono.empty();
}
/**
* Authorizes the request with the bearer token acquired using the specified {@code tokenRequestContext}
*
* @param context the HTTP pipeline context.
* @param tokenRequestContext the token request conext to be used for token acquisition.
* @return a {@link Mono} containing {@link Void}
*/
@Override
public Mono<Void> setAuthorizationHeader(HttpPipelineCallContext context, TokenRequestContext tokenRequestContext) {
return tokenService.getToken(tokenRequestContext)
.flatMap((token) -> {
context.getHttpRequest().getHeaders().set(AUTHORIZATION, BEARER + " " + token.getToken());
return Mono.empty();
});
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if ("http".equals(context.getHttpRequest().getUrl().getProtocol())) {
return Mono.error(new RuntimeException("token credentials require a URL using the HTTPS protocol scheme"));
}
if (context.getHttpRequest().getBody() != null) {
context.getHttpRequest().setBody(context.getHttpRequest().getBody().map(buffer -> buffer.duplicate()));
}
HttpPipelineNextPolicy nextPolicy = next.clone();
return authorizeRequest(context)
.then(Mono.defer(() -> next.process()))
.flatMap(httpResponse -> {
String authHeader = httpResponse.getHeaderValue(WWW_AUTHENTICATE);
if (httpResponse.getStatusCode() == 401 && authHeader != null) {
return authorizeRequestOnChallenge(context, httpResponse).flatMap(retry -> {
if (retry) {
return nextPolicy.process()
.doFinally(ignored -> {
httpResponse.close();
});
} else {
return Mono.just(httpResponse);
}
});
}
return Mono.just(httpResponse);
});
}
/**
* Handles the authentication challenge in the event a 401 response with a WWW-Authenticate authentication
* challenge header is received after the initial request and returns appropriate {@link TokenRequestContext} to
* be used for re-authentication.
*
* @param context The request context.
* @param response The Http Response containing the authentication challenge header.
* @return A {@link Mono} containing {@link Boolean}
*/
@Override
public Mono<Boolean> authorizeRequestOnChallenge(HttpPipelineCallContext context, HttpResponse response) {
return Mono.defer(() -> {
String authHeader = response.getHeaderValue(WWW_AUTHENTICATE);
if (!(response.getStatusCode() == 401 && authHeader != null)) {
return Mono.just(false);
} else {
Map<String, String> extractedChallengeParams = parseBearerChallenge(authHeader);
if (extractedChallengeParams != null && extractedChallengeParams.containsKey(SCOPES_PARAMETER)) {
String scope = extractedChallengeParams.get(SCOPES_PARAMETER);
String serviceName = extractedChallengeParams.get(SERVICE_PARAMETER);
return setAuthorizationHeader(context, new ContainerRegistryTokenRequestContext(serviceName, scope))
.then(Mono.defer(() -> Mono.just(true)));
}
return Mono.just(false);
}
});
}
/**
* Executed before sending the initial request and authenticates the request.
*
* @param context The request context.
* @return A {@link Mono} containing {@link Void}
*/
@Override
public void authorizeRequestSync(HttpPipelineCallContext context) {
}
/**
* Authorizes the request with the bearer token acquired using the specified {@code tokenRequestContext}
*
* @param context the HTTP pipeline context.
* @param tokenRequestContext the token request context to be used for token acquisition.
* @return a {@link Mono} containing {@link Void}
*/
@Override
public void setAuthorizationHeaderSync(HttpPipelineCallContext context, TokenRequestContext tokenRequestContext) {
AccessToken token = tokenService.getTokenSync(tokenRequestContext);
context.getHttpRequest().getHeaders().set(AUTHORIZATION, BEARER + " " + token.getToken());
}
@Override
/**
* Handles the authentication challenge in the event a 401 response with a WWW-Authenticate authentication
* challenge header is received after the initial request and returns appropriate {@link TokenRequestContext} to
* be used for re-authentication.
*
* @param context The request context.
* @param response The Http Response containing the authentication challenge header.
* @return A {@link Mono} containing {@link Boolean}
*/
@Override
public boolean authorizeRequestOnChallengeSync(HttpPipelineCallContext context, HttpResponse response) {
String authHeader = response.getHeaderValue(WWW_AUTHENTICATE);
if (!(response.getStatusCode() == 401 && authHeader != null)) {
return false;
} else {
Map<String, String> extractedChallengeParams = parseBearerChallenge(authHeader);
if (extractedChallengeParams != null && extractedChallengeParams.containsKey(SCOPES_PARAMETER)) {
String scope = extractedChallengeParams.get(SCOPES_PARAMETER);
String serviceName = extractedChallengeParams.get(SERVICE_PARAMETER);
setAuthorizationHeaderSync(context, new ContainerRegistryTokenRequestContext(serviceName, scope));
return true;
}
return false;
}
}
private Map<String, String> parseBearerChallenge(String header) {
if (header.startsWith(BEARER)) {
String challengeParams = header.substring(BEARER.length());
Matcher matcher2 = AUTHENTICATION_CHALLENGE_PARAMS_PATTERN.matcher(challengeParams);
Map<String, String> challengeParameters = new HashMap<>();
while (matcher2.find()) {
challengeParameters.put(matcher2.group(1), matcher2.group(2));
}
return challengeParameters;
}
return null;
}
} | class ContainerRegistryCredentialsPolicy extends BearerTokenAuthenticationPolicy {
private static final ClientLogger LOGGER = new ClientLogger(ContainerRegistryCredentialsPolicy.class);
private static final String BEARER = "Bearer";
public static final Pattern AUTHENTICATION_CHALLENGE_PARAMS_PATTERN =
Pattern.compile("(?:(\\w+)=\"([^\"\"]*)\")+");
public static final String WWW_AUTHENTICATE = "WWW-Authenticate";
public static final String SCOPES_PARAMETER = "scope";
public static final String SERVICE_PARAMETER = "service";
public static final String AUTHORIZATION = "Authorization";
private final ContainerRegistryTokenService tokenService;
private final ClientLogger logger = new ClientLogger(ContainerRegistryCredentialsPolicy.class);
/**
* Creates an instance of ContainerRegistryCredentialsPolicy.
*
* @param tokenService the token generation service.
*/
public ContainerRegistryCredentialsPolicy(ContainerRegistryTokenService tokenService) {
super(tokenService);
this.tokenService = tokenService;
}
/**
* Executed before sending the initial request and authenticates the request.
*
* @param context The request context.
* @return A {@link Mono} containing {@link Void}
*/
@Override
public Mono<Void> authorizeRequest(HttpPipelineCallContext context) {
return Mono.empty();
}
/**
* Authorizes the request with the bearer token acquired using the specified {@code tokenRequestContext}
*
* @param context the HTTP pipeline context.
* @param tokenRequestContext the token request conext to be used for token acquisition.
* @return a {@link Mono} containing {@link Void}
*/
@Override
public Mono<Void> setAuthorizationHeader(HttpPipelineCallContext context, TokenRequestContext tokenRequestContext) {
return tokenService.getToken(tokenRequestContext)
.flatMap((token) -> {
context.getHttpRequest().getHeaders().set(AUTHORIZATION, BEARER + " " + token.getToken());
return Mono.empty();
});
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if ("http".equals(context.getHttpRequest().getUrl().getProtocol())) {
return Mono.error(new RuntimeException("token credentials require a URL using the HTTPS protocol scheme"));
}
if (context.getHttpRequest().getBody() != null) {
context.getHttpRequest().setBody(context.getHttpRequest().getBody().map(buffer -> buffer.duplicate()));
}
HttpPipelineNextPolicy nextPolicy = next.clone();
return authorizeRequest(context)
.then(Mono.defer(() -> next.process()))
.flatMap(httpResponse -> {
String authHeader = httpResponse.getHeaderValue(WWW_AUTHENTICATE);
if (httpResponse.getStatusCode() == 401 && authHeader != null) {
return authorizeRequestOnChallenge(context, httpResponse).flatMap(retry -> {
if (retry) {
return nextPolicy.process()
.doFinally(ignored -> {
httpResponse.close();
});
} else {
return Mono.just(httpResponse);
}
});
}
return Mono.just(httpResponse);
});
}
/**
* Handles the authentication challenge in the event a 401 response with a WWW-Authenticate authentication
* challenge header is received after the initial request and returns appropriate {@link TokenRequestContext} to
* be used for re-authentication.
*
* @param context The request context.
* @param response The Http Response containing the authentication challenge header.
* @return A {@link Mono} containing {@link Boolean}
*/
@Override
public Mono<Boolean> authorizeRequestOnChallenge(HttpPipelineCallContext context, HttpResponse response) {
return Mono.defer(() -> {
String authHeader = response.getHeaderValue(WWW_AUTHENTICATE);
if (!(response.getStatusCode() == 401 && authHeader != null)) {
return Mono.just(false);
} else {
Map<String, String> extractedChallengeParams = parseBearerChallenge(authHeader);
if (extractedChallengeParams != null && extractedChallengeParams.containsKey(SCOPES_PARAMETER)) {
String scope = extractedChallengeParams.get(SCOPES_PARAMETER);
String serviceName = extractedChallengeParams.get(SERVICE_PARAMETER);
return setAuthorizationHeader(context, new ContainerRegistryTokenRequestContext(serviceName, scope))
.then(Mono.defer(() -> Mono.just(true)));
}
return Mono.just(false);
}
});
}
/**
* Executed before sending the initial request and authenticates the request.
*
* @param context The request context.
* @return A {@link Mono} containing {@link Void}
*/
@Override
public void authorizeRequestSync(HttpPipelineCallContext context) {
}
/**
* Authorizes the request with the bearer token acquired using the specified {@code tokenRequestContext}
*
* @param context the HTTP pipeline context.
* @param tokenRequestContext the token request context to be used for token acquisition.
* @return a {@link Mono} containing {@link Void}
*/
@Override
public void setAuthorizationHeaderSync(HttpPipelineCallContext context, TokenRequestContext tokenRequestContext) {
AccessToken token = tokenService.getTokenSync(tokenRequestContext);
context.getHttpRequest().getHeaders().set(AUTHORIZATION, BEARER + " " + token.getToken());
}
@Override
/**
* Handles the authentication challenge in the event a 401 response with a WWW-Authenticate authentication
* challenge header is received after the initial request and returns appropriate {@link TokenRequestContext} to
* be used for re-authentication.
*
* @param context The request context.
* @param response The Http Response containing the authentication challenge header.
* @return A {@link Mono} containing {@link Boolean}
*/
@Override
public boolean authorizeRequestOnChallengeSync(HttpPipelineCallContext context, HttpResponse response) {
String authHeader = response.getHeaderValue(WWW_AUTHENTICATE);
if (!(response.getStatusCode() == 401 && authHeader != null)) {
return false;
} else {
Map<String, String> extractedChallengeParams = parseBearerChallenge(authHeader);
if (extractedChallengeParams != null && extractedChallengeParams.containsKey(SCOPES_PARAMETER)) {
String scope = extractedChallengeParams.get(SCOPES_PARAMETER);
String serviceName = extractedChallengeParams.get(SERVICE_PARAMETER);
setAuthorizationHeaderSync(context, new ContainerRegistryTokenRequestContext(serviceName, scope));
return true;
}
return false;
}
}
private Map<String, String> parseBearerChallenge(String header) {
if (header.startsWith(BEARER)) {
String challengeParams = header.substring(BEARER.length());
Matcher matcher2 = AUTHENTICATION_CHALLENGE_PARAMS_PATTERN.matcher(challengeParams);
Map<String, String> challengeParameters = new HashMap<>();
while (matcher2.find()) {
challengeParameters.put(matcher2.group(1), matcher2.group(2));
}
return challengeParameters;
}
return null;
}
} | |
we should have todo/work item to revisit this later. this one seems to be simpler than identity. | public AccessToken getTokenSync(TokenRequestContext tokenRequestContext) {
return this.getToken(tokenRequestContext).block();
} | return this.getToken(tokenRequestContext).block(); | public AccessToken getTokenSync(TokenRequestContext tokenRequestContext) {
return this.getToken(tokenRequestContext).block();
} | class ContainerRegistryTokenService implements TokenCredential {
private AccessTokenCacheImpl refreshTokenCache;
private TokenServiceImpl tokenService;
private boolean isAnonymousAccess;
private final ClientLogger logger = new ClientLogger(ContainerRegistryTokenService.class);
/**
* Creates an instance of AccessTokenCache with default scheme "Bearer".
*
* @param aadTokenCredential the credential to be used to acquire the token.
* @param url the container registry endpoint.
* @param serviceVersion the service api version being targeted by the client.
* @param pipeline the pipeline to be used for the rest calls to the service.
* @param serializerAdapter the serializer adapter to be used for the rest calls to the service.
*/
public ContainerRegistryTokenService(TokenCredential aadTokenCredential, ContainerRegistryAudience audience,
String url, ContainerRegistryServiceVersion serviceVersion,
HttpPipeline pipeline, SerializerAdapter serializerAdapter) {
this.tokenService = new TokenServiceImpl(url, serviceVersion, pipeline, serializerAdapter);
if (aadTokenCredential != null) {
this.refreshTokenCache = new AccessTokenCacheImpl(
new ContainerRegistryRefreshTokenCredential(tokenService, aadTokenCredential, audience));
} else {
isAnonymousAccess = true;
}
}
ContainerRegistryTokenService setTokenService(TokenServiceImpl tokenServiceImpl) {
this.tokenService = tokenServiceImpl;
return this;
}
ContainerRegistryTokenService setRefreshTokenCache(AccessTokenCacheImpl tokenCache) {
this.refreshTokenCache = tokenCache;
return this;
}
ContainerRegistryTokenService setAnonymousAccess(boolean isAnonymousAccess) {
this.isAnonymousAccess = isAnonymousAccess;
return this;
}
/**
* Gets a token against the token request context.
*
* @param tokenRequestContext the token request context to be used to get the token.
*/
@Override
public Mono<AccessToken> getToken(TokenRequestContext tokenRequestContext) {
if (!(tokenRequestContext instanceof ContainerRegistryTokenRequestContext)) {
logger.info("tokenRequestContext is not of the type ContainerRegistryTokenRequestContext");
return Mono.empty();
}
ContainerRegistryTokenRequestContext requestContext =
(ContainerRegistryTokenRequestContext) tokenRequestContext;
String scope = requestContext.getScope();
String serviceName = requestContext.getServiceName();
return Mono.defer(() -> {
if (this.isAnonymousAccess) {
return this.tokenService.getAcrAccessTokenAsync(null, scope, serviceName, TokenGrantType.PASSWORD);
}
return this.refreshTokenCache.getToken(requestContext)
.flatMap(refreshToken -> this.tokenService.getAcrAccessTokenAsync(refreshToken.getToken(), scope,
serviceName, TokenGrantType.REFRESH_TOKEN));
}).doOnError(err -> logger.error("Could not fetch the ACR error token.", err));
}
} | class ContainerRegistryTokenService implements TokenCredential {
private AccessTokenCacheImpl refreshTokenCache;
private TokenServiceImpl tokenService;
private boolean isAnonymousAccess;
private final ClientLogger logger = new ClientLogger(ContainerRegistryTokenService.class);
/**
* Creates an instance of AccessTokenCache with default scheme "Bearer".
*
* @param aadTokenCredential the credential to be used to acquire the token.
* @param url the container registry endpoint.
* @param serviceVersion the service api version being targeted by the client.
* @param pipeline the pipeline to be used for the rest calls to the service.
* @param serializerAdapter the serializer adapter to be used for the rest calls to the service.
*/
public ContainerRegistryTokenService(TokenCredential aadTokenCredential, ContainerRegistryAudience audience,
String url, ContainerRegistryServiceVersion serviceVersion,
HttpPipeline pipeline, SerializerAdapter serializerAdapter) {
this.tokenService = new TokenServiceImpl(url, serviceVersion, pipeline, serializerAdapter);
if (aadTokenCredential != null) {
this.refreshTokenCache = new AccessTokenCacheImpl(
new ContainerRegistryRefreshTokenCredential(tokenService, aadTokenCredential, audience));
} else {
isAnonymousAccess = true;
}
}
ContainerRegistryTokenService setTokenService(TokenServiceImpl tokenServiceImpl) {
this.tokenService = tokenServiceImpl;
return this;
}
ContainerRegistryTokenService setRefreshTokenCache(AccessTokenCacheImpl tokenCache) {
this.refreshTokenCache = tokenCache;
return this;
}
ContainerRegistryTokenService setAnonymousAccess(boolean isAnonymousAccess) {
this.isAnonymousAccess = isAnonymousAccess;
return this;
}
/**
* Gets a token against the token request context.
*
* @param tokenRequestContext the token request context to be used to get the token.
*/
@Override
public Mono<AccessToken> getToken(TokenRequestContext tokenRequestContext) {
if (!(tokenRequestContext instanceof ContainerRegistryTokenRequestContext)) {
logger.info("tokenRequestContext is not of the type ContainerRegistryTokenRequestContext");
return Mono.empty();
}
ContainerRegistryTokenRequestContext requestContext =
(ContainerRegistryTokenRequestContext) tokenRequestContext;
String scope = requestContext.getScope();
String serviceName = requestContext.getServiceName();
return Mono.defer(() -> {
if (this.isAnonymousAccess) {
return this.tokenService.getAcrAccessTokenAsync(null, scope, serviceName, TokenGrantType.PASSWORD);
}
return this.refreshTokenCache.getToken(requestContext)
.flatMap(refreshToken -> this.tokenService.getAcrAccessTokenAsync(refreshToken.getToken(), scope,
serviceName, TokenGrantType.REFRESH_TOKEN));
}).doOnError(err -> logger.error("Could not fetch the ACR error token.", err));
}
} |
https://github.com/Azure/azure-sdk-for-java/issues/29746 | public AccessToken getTokenSync(TokenRequestContext tokenRequestContext) {
return this.getToken(tokenRequestContext).block();
} | return this.getToken(tokenRequestContext).block(); | public AccessToken getTokenSync(TokenRequestContext tokenRequestContext) {
return this.getToken(tokenRequestContext).block();
} | class ContainerRegistryTokenService implements TokenCredential {
private AccessTokenCacheImpl refreshTokenCache;
private TokenServiceImpl tokenService;
private boolean isAnonymousAccess;
private final ClientLogger logger = new ClientLogger(ContainerRegistryTokenService.class);
/**
* Creates an instance of AccessTokenCache with default scheme "Bearer".
*
* @param aadTokenCredential the credential to be used to acquire the token.
* @param url the container registry endpoint.
* @param serviceVersion the service api version being targeted by the client.
* @param pipeline the pipeline to be used for the rest calls to the service.
* @param serializerAdapter the serializer adapter to be used for the rest calls to the service.
*/
public ContainerRegistryTokenService(TokenCredential aadTokenCredential, ContainerRegistryAudience audience,
String url, ContainerRegistryServiceVersion serviceVersion,
HttpPipeline pipeline, SerializerAdapter serializerAdapter) {
this.tokenService = new TokenServiceImpl(url, serviceVersion, pipeline, serializerAdapter);
if (aadTokenCredential != null) {
this.refreshTokenCache = new AccessTokenCacheImpl(
new ContainerRegistryRefreshTokenCredential(tokenService, aadTokenCredential, audience));
} else {
isAnonymousAccess = true;
}
}
ContainerRegistryTokenService setTokenService(TokenServiceImpl tokenServiceImpl) {
this.tokenService = tokenServiceImpl;
return this;
}
ContainerRegistryTokenService setRefreshTokenCache(AccessTokenCacheImpl tokenCache) {
this.refreshTokenCache = tokenCache;
return this;
}
ContainerRegistryTokenService setAnonymousAccess(boolean isAnonymousAccess) {
this.isAnonymousAccess = isAnonymousAccess;
return this;
}
/**
* Gets a token against the token request context.
*
* @param tokenRequestContext the token request context to be used to get the token.
*/
@Override
public Mono<AccessToken> getToken(TokenRequestContext tokenRequestContext) {
if (!(tokenRequestContext instanceof ContainerRegistryTokenRequestContext)) {
logger.info("tokenRequestContext is not of the type ContainerRegistryTokenRequestContext");
return Mono.empty();
}
ContainerRegistryTokenRequestContext requestContext =
(ContainerRegistryTokenRequestContext) tokenRequestContext;
String scope = requestContext.getScope();
String serviceName = requestContext.getServiceName();
return Mono.defer(() -> {
if (this.isAnonymousAccess) {
return this.tokenService.getAcrAccessTokenAsync(null, scope, serviceName, TokenGrantType.PASSWORD);
}
return this.refreshTokenCache.getToken(requestContext)
.flatMap(refreshToken -> this.tokenService.getAcrAccessTokenAsync(refreshToken.getToken(), scope,
serviceName, TokenGrantType.REFRESH_TOKEN));
}).doOnError(err -> logger.error("Could not fetch the ACR error token.", err));
}
} | class ContainerRegistryTokenService implements TokenCredential {
private AccessTokenCacheImpl refreshTokenCache;
private TokenServiceImpl tokenService;
private boolean isAnonymousAccess;
private final ClientLogger logger = new ClientLogger(ContainerRegistryTokenService.class);
/**
* Creates an instance of AccessTokenCache with default scheme "Bearer".
*
* @param aadTokenCredential the credential to be used to acquire the token.
* @param url the container registry endpoint.
* @param serviceVersion the service api version being targeted by the client.
* @param pipeline the pipeline to be used for the rest calls to the service.
* @param serializerAdapter the serializer adapter to be used for the rest calls to the service.
*/
public ContainerRegistryTokenService(TokenCredential aadTokenCredential, ContainerRegistryAudience audience,
String url, ContainerRegistryServiceVersion serviceVersion,
HttpPipeline pipeline, SerializerAdapter serializerAdapter) {
this.tokenService = new TokenServiceImpl(url, serviceVersion, pipeline, serializerAdapter);
if (aadTokenCredential != null) {
this.refreshTokenCache = new AccessTokenCacheImpl(
new ContainerRegistryRefreshTokenCredential(tokenService, aadTokenCredential, audience));
} else {
isAnonymousAccess = true;
}
}
ContainerRegistryTokenService setTokenService(TokenServiceImpl tokenServiceImpl) {
this.tokenService = tokenServiceImpl;
return this;
}
ContainerRegistryTokenService setRefreshTokenCache(AccessTokenCacheImpl tokenCache) {
this.refreshTokenCache = tokenCache;
return this;
}
ContainerRegistryTokenService setAnonymousAccess(boolean isAnonymousAccess) {
this.isAnonymousAccess = isAnonymousAccess;
return this;
}
/**
* Gets a token against the token request context.
*
* @param tokenRequestContext the token request context to be used to get the token.
*/
@Override
public Mono<AccessToken> getToken(TokenRequestContext tokenRequestContext) {
if (!(tokenRequestContext instanceof ContainerRegistryTokenRequestContext)) {
logger.info("tokenRequestContext is not of the type ContainerRegistryTokenRequestContext");
return Mono.empty();
}
ContainerRegistryTokenRequestContext requestContext =
(ContainerRegistryTokenRequestContext) tokenRequestContext;
String scope = requestContext.getScope();
String serviceName = requestContext.getServiceName();
return Mono.defer(() -> {
if (this.isAnonymousAccess) {
return this.tokenService.getAcrAccessTokenAsync(null, scope, serviceName, TokenGrantType.PASSWORD);
}
return this.refreshTokenCache.getToken(requestContext)
.flatMap(refreshToken -> this.tokenService.getAcrAccessTokenAsync(refreshToken.getToken(), scope,
serviceName, TokenGrantType.REFRESH_TOKEN));
}).doOnError(err -> logger.error("Could not fetch the ACR error token.", err));
}
} |
is it possible to extract common code from sync and async methods? | public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if ("http".equals(context.getHttpRequest().getUrl().getProtocol())) {
throw LOGGER.logExceptionAsError(
new RuntimeException("token credentials require a URL using the HTTPS protocol scheme"));
}
if (context.getHttpRequest().getBody() != null) {
context.getHttpRequest().setBody(context.getHttpRequest().getBodyAsBinaryData().toReplayableBinaryData());
}
HttpPipelineNextSyncPolicy nextPolicy = next.clone();
authorizeRequestSync(context);
HttpResponse httpResponse = next.processSync();
String authHeader = httpResponse.getHeaderValue(WWW_AUTHENTICATE);
if (httpResponse.getStatusCode() == 401 && authHeader != null) {
if (authorizeRequestOnChallengeSync(context, httpResponse)) {
httpResponse.close();
return nextPolicy.processSync();
} else {
return httpResponse;
}
}
return httpResponse;
} | if ("http".equals(context.getHttpRequest().getUrl().getProtocol())) { | public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if ("http".equals(context.getHttpRequest().getUrl().getProtocol())) {
throw LOGGER.logExceptionAsError(
new RuntimeException("token credentials require a URL using the HTTPS protocol scheme"));
}
if (context.getHttpRequest().getBody() != null) {
context.getHttpRequest().setBody(context.getHttpRequest().getBodyAsBinaryData().toReplayableBinaryData());
}
HttpPipelineNextSyncPolicy nextPolicy = next.clone();
authorizeRequestSync(context);
HttpResponse httpResponse = next.processSync();
String authHeader = httpResponse.getHeaderValue(WWW_AUTHENTICATE);
if (httpResponse.getStatusCode() == 401 && authHeader != null) {
if (authorizeRequestOnChallengeSync(context, httpResponse)) {
httpResponse.close();
return nextPolicy.processSync();
} else {
return httpResponse;
}
}
return httpResponse;
} | class ContainerRegistryCredentialsPolicy extends BearerTokenAuthenticationPolicy {
private static final ClientLogger LOGGER = new ClientLogger(ContainerRegistryCredentialsPolicy.class);
private static final String BEARER = "Bearer";
public static final Pattern AUTHENTICATION_CHALLENGE_PARAMS_PATTERN =
Pattern.compile("(?:(\\w+)=\"([^\"\"]*)\")+");
public static final String WWW_AUTHENTICATE = "WWW-Authenticate";
public static final String SCOPES_PARAMETER = "scope";
public static final String SERVICE_PARAMETER = "service";
public static final String AUTHORIZATION = "Authorization";
private final ContainerRegistryTokenService tokenService;
private final ClientLogger logger = new ClientLogger(ContainerRegistryCredentialsPolicy.class);
/**
* Creates an instance of ContainerRegistryCredentialsPolicy.
*
* @param tokenService the token generation service.
*/
public ContainerRegistryCredentialsPolicy(ContainerRegistryTokenService tokenService) {
super(tokenService);
this.tokenService = tokenService;
}
/**
* Executed before sending the initial request and authenticates the request.
*
* @param context The request context.
* @return A {@link Mono} containing {@link Void}
*/
@Override
public Mono<Void> authorizeRequest(HttpPipelineCallContext context) {
return Mono.empty();
}
/**
* Authorizes the request with the bearer token acquired using the specified {@code tokenRequestContext}
*
* @param context the HTTP pipeline context.
* @param tokenRequestContext the token request conext to be used for token acquisition.
* @return a {@link Mono} containing {@link Void}
*/
@Override
public Mono<Void> setAuthorizationHeader(HttpPipelineCallContext context, TokenRequestContext tokenRequestContext) {
return tokenService.getToken(tokenRequestContext)
.flatMap((token) -> {
context.getHttpRequest().getHeaders().set(AUTHORIZATION, BEARER + " " + token.getToken());
return Mono.empty();
});
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if ("http".equals(context.getHttpRequest().getUrl().getProtocol())) {
return Mono.error(new RuntimeException("token credentials require a URL using the HTTPS protocol scheme"));
}
if (context.getHttpRequest().getBody() != null) {
context.getHttpRequest().setBody(context.getHttpRequest().getBody().map(buffer -> buffer.duplicate()));
}
HttpPipelineNextPolicy nextPolicy = next.clone();
return authorizeRequest(context)
.then(Mono.defer(() -> next.process()))
.flatMap(httpResponse -> {
String authHeader = httpResponse.getHeaderValue(WWW_AUTHENTICATE);
if (httpResponse.getStatusCode() == 401 && authHeader != null) {
return authorizeRequestOnChallenge(context, httpResponse).flatMap(retry -> {
if (retry) {
return nextPolicy.process()
.doFinally(ignored -> {
httpResponse.close();
});
} else {
return Mono.just(httpResponse);
}
});
}
return Mono.just(httpResponse);
});
}
/**
* Handles the authentication challenge in the event a 401 response with a WWW-Authenticate authentication
* challenge header is received after the initial request and returns appropriate {@link TokenRequestContext} to
* be used for re-authentication.
*
* @param context The request context.
* @param response The Http Response containing the authentication challenge header.
* @return A {@link Mono} containing {@link Boolean}
*/
@Override
public Mono<Boolean> authorizeRequestOnChallenge(HttpPipelineCallContext context, HttpResponse response) {
return Mono.defer(() -> {
String authHeader = response.getHeaderValue(WWW_AUTHENTICATE);
if (!(response.getStatusCode() == 401 && authHeader != null)) {
return Mono.just(false);
} else {
Map<String, String> extractedChallengeParams = parseBearerChallenge(authHeader);
if (extractedChallengeParams != null && extractedChallengeParams.containsKey(SCOPES_PARAMETER)) {
String scope = extractedChallengeParams.get(SCOPES_PARAMETER);
String serviceName = extractedChallengeParams.get(SERVICE_PARAMETER);
return setAuthorizationHeader(context, new ContainerRegistryTokenRequestContext(serviceName, scope))
.then(Mono.defer(() -> Mono.just(true)));
}
return Mono.just(false);
}
});
}
/**
* Executed before sending the initial request and authenticates the request.
*
* @param context The request context.
* @return A {@link Mono} containing {@link Void}
*/
@Override
public void authorizeRequestSync(HttpPipelineCallContext context) {
}
/**
* Authorizes the request with the bearer token acquired using the specified {@code tokenRequestContext}
*
* @param context the HTTP pipeline context.
* @param tokenRequestContext the token request context to be used for token acquisition.
* @return a {@link Mono} containing {@link Void}
*/
@Override
public void setAuthorizationHeaderSync(HttpPipelineCallContext context, TokenRequestContext tokenRequestContext) {
AccessToken token = tokenService.getTokenSync(tokenRequestContext);
context.getHttpRequest().getHeaders().set(AUTHORIZATION, BEARER + " " + token.getToken());
}
@Override
/**
* Handles the authentication challenge in the event a 401 response with a WWW-Authenticate authentication
* challenge header is received after the initial request and returns appropriate {@link TokenRequestContext} to
* be used for re-authentication.
*
* @param context The request context.
* @param response The Http Response containing the authentication challenge header.
* @return A {@link Mono} containing {@link Boolean}
*/
@Override
public boolean authorizeRequestOnChallengeSync(HttpPipelineCallContext context, HttpResponse response) {
String authHeader = response.getHeaderValue(WWW_AUTHENTICATE);
if (!(response.getStatusCode() == 401 && authHeader != null)) {
return false;
} else {
Map<String, String> extractedChallengeParams = parseBearerChallenge(authHeader);
if (extractedChallengeParams != null && extractedChallengeParams.containsKey(SCOPES_PARAMETER)) {
String scope = extractedChallengeParams.get(SCOPES_PARAMETER);
String serviceName = extractedChallengeParams.get(SERVICE_PARAMETER);
setAuthorizationHeaderSync(context, new ContainerRegistryTokenRequestContext(serviceName, scope));
return true;
}
return false;
}
}
private Map<String, String> parseBearerChallenge(String header) {
if (header.startsWith(BEARER)) {
String challengeParams = header.substring(BEARER.length());
Matcher matcher2 = AUTHENTICATION_CHALLENGE_PARAMS_PATTERN.matcher(challengeParams);
Map<String, String> challengeParameters = new HashMap<>();
while (matcher2.find()) {
challengeParameters.put(matcher2.group(1), matcher2.group(2));
}
return challengeParameters;
}
return null;
}
} | class ContainerRegistryCredentialsPolicy extends BearerTokenAuthenticationPolicy {
private static final ClientLogger LOGGER = new ClientLogger(ContainerRegistryCredentialsPolicy.class);
private static final String BEARER = "Bearer";
public static final Pattern AUTHENTICATION_CHALLENGE_PARAMS_PATTERN =
Pattern.compile("(?:(\\w+)=\"([^\"\"]*)\")+");
public static final String WWW_AUTHENTICATE = "WWW-Authenticate";
public static final String SCOPES_PARAMETER = "scope";
public static final String SERVICE_PARAMETER = "service";
public static final String AUTHORIZATION = "Authorization";
private final ContainerRegistryTokenService tokenService;
private final ClientLogger logger = new ClientLogger(ContainerRegistryCredentialsPolicy.class);
/**
* Creates an instance of ContainerRegistryCredentialsPolicy.
*
* @param tokenService the token generation service.
*/
public ContainerRegistryCredentialsPolicy(ContainerRegistryTokenService tokenService) {
super(tokenService);
this.tokenService = tokenService;
}
/**
* Executed before sending the initial request and authenticates the request.
*
* @param context The request context.
* @return A {@link Mono} containing {@link Void}
*/
@Override
public Mono<Void> authorizeRequest(HttpPipelineCallContext context) {
return Mono.empty();
}
/**
* Authorizes the request with the bearer token acquired using the specified {@code tokenRequestContext}
*
* @param context the HTTP pipeline context.
* @param tokenRequestContext the token request conext to be used for token acquisition.
* @return a {@link Mono} containing {@link Void}
*/
@Override
public Mono<Void> setAuthorizationHeader(HttpPipelineCallContext context, TokenRequestContext tokenRequestContext) {
return tokenService.getToken(tokenRequestContext)
.flatMap((token) -> {
context.getHttpRequest().getHeaders().set(AUTHORIZATION, BEARER + " " + token.getToken());
return Mono.empty();
});
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if ("http".equals(context.getHttpRequest().getUrl().getProtocol())) {
return Mono.error(new RuntimeException("token credentials require a URL using the HTTPS protocol scheme"));
}
if (context.getHttpRequest().getBody() != null) {
context.getHttpRequest().setBody(context.getHttpRequest().getBody().map(buffer -> buffer.duplicate()));
}
HttpPipelineNextPolicy nextPolicy = next.clone();
return authorizeRequest(context)
.then(Mono.defer(() -> next.process()))
.flatMap(httpResponse -> {
String authHeader = httpResponse.getHeaderValue(WWW_AUTHENTICATE);
if (httpResponse.getStatusCode() == 401 && authHeader != null) {
return authorizeRequestOnChallenge(context, httpResponse).flatMap(retry -> {
if (retry) {
return nextPolicy.process()
.doFinally(ignored -> {
httpResponse.close();
});
} else {
return Mono.just(httpResponse);
}
});
}
return Mono.just(httpResponse);
});
}
/**
* Handles the authentication challenge in the event a 401 response with a WWW-Authenticate authentication
* challenge header is received after the initial request and returns appropriate {@link TokenRequestContext} to
* be used for re-authentication.
*
* @param context The request context.
* @param response The Http Response containing the authentication challenge header.
* @return A {@link Mono} containing {@link Boolean}
*/
@Override
public Mono<Boolean> authorizeRequestOnChallenge(HttpPipelineCallContext context, HttpResponse response) {
return Mono.defer(() -> {
String authHeader = response.getHeaderValue(WWW_AUTHENTICATE);
if (!(response.getStatusCode() == 401 && authHeader != null)) {
return Mono.just(false);
} else {
Map<String, String> extractedChallengeParams = parseBearerChallenge(authHeader);
if (extractedChallengeParams != null && extractedChallengeParams.containsKey(SCOPES_PARAMETER)) {
String scope = extractedChallengeParams.get(SCOPES_PARAMETER);
String serviceName = extractedChallengeParams.get(SERVICE_PARAMETER);
return setAuthorizationHeader(context, new ContainerRegistryTokenRequestContext(serviceName, scope))
.then(Mono.defer(() -> Mono.just(true)));
}
return Mono.just(false);
}
});
}
/**
* Executed before sending the initial request and authenticates the request.
*
* @param context The request context.
* @return A {@link Mono} containing {@link Void}
*/
@Override
public void authorizeRequestSync(HttpPipelineCallContext context) {
}
/**
* Authorizes the request with the bearer token acquired using the specified {@code tokenRequestContext}
*
* @param context the HTTP pipeline context.
* @param tokenRequestContext the token request context to be used for token acquisition.
* @return a {@link Mono} containing {@link Void}
*/
@Override
public void setAuthorizationHeaderSync(HttpPipelineCallContext context, TokenRequestContext tokenRequestContext) {
AccessToken token = tokenService.getTokenSync(tokenRequestContext);
context.getHttpRequest().getHeaders().set(AUTHORIZATION, BEARER + " " + token.getToken());
}
@Override
/**
* Handles the authentication challenge in the event a 401 response with a WWW-Authenticate authentication
* challenge header is received after the initial request and returns appropriate {@link TokenRequestContext} to
* be used for re-authentication.
*
* @param context The request context.
* @param response The Http Response containing the authentication challenge header.
* @return A {@link Mono} containing {@link Boolean}
*/
@Override
public boolean authorizeRequestOnChallengeSync(HttpPipelineCallContext context, HttpResponse response) {
String authHeader = response.getHeaderValue(WWW_AUTHENTICATE);
if (!(response.getStatusCode() == 401 && authHeader != null)) {
return false;
} else {
Map<String, String> extractedChallengeParams = parseBearerChallenge(authHeader);
if (extractedChallengeParams != null && extractedChallengeParams.containsKey(SCOPES_PARAMETER)) {
String scope = extractedChallengeParams.get(SCOPES_PARAMETER);
String serviceName = extractedChallengeParams.get(SERVICE_PARAMETER);
setAuthorizationHeaderSync(context, new ContainerRegistryTokenRequestContext(serviceName, scope));
return true;
}
return false;
}
}
private Map<String, String> parseBearerChallenge(String header) {
if (header.startsWith(BEARER)) {
String challengeParams = header.substring(BEARER.length());
Matcher matcher2 = AUTHENTICATION_CHALLENGE_PARAMS_PATTERN.matcher(challengeParams);
Map<String, String> challengeParameters = new HashMap<>();
while (matcher2.find()) {
challengeParameters.put(matcher2.group(1), matcher2.group(2));
}
return challengeParameters;
}
return null;
}
} |
It doesn't seem very easily possible here, we have done that in [other policies](https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/core/azure-core/src/main/java/com/azure/core/http/policy/PortPolicy.java#L29) | public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if ("http".equals(context.getHttpRequest().getUrl().getProtocol())) {
throw LOGGER.logExceptionAsError(
new RuntimeException("token credentials require a URL using the HTTPS protocol scheme"));
}
if (context.getHttpRequest().getBody() != null) {
context.getHttpRequest().setBody(context.getHttpRequest().getBodyAsBinaryData().toReplayableBinaryData());
}
HttpPipelineNextSyncPolicy nextPolicy = next.clone();
authorizeRequestSync(context);
HttpResponse httpResponse = next.processSync();
String authHeader = httpResponse.getHeaderValue(WWW_AUTHENTICATE);
if (httpResponse.getStatusCode() == 401 && authHeader != null) {
if (authorizeRequestOnChallengeSync(context, httpResponse)) {
httpResponse.close();
return nextPolicy.processSync();
} else {
return httpResponse;
}
}
return httpResponse;
} | if ("http".equals(context.getHttpRequest().getUrl().getProtocol())) { | public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if ("http".equals(context.getHttpRequest().getUrl().getProtocol())) {
throw LOGGER.logExceptionAsError(
new RuntimeException("token credentials require a URL using the HTTPS protocol scheme"));
}
if (context.getHttpRequest().getBody() != null) {
context.getHttpRequest().setBody(context.getHttpRequest().getBodyAsBinaryData().toReplayableBinaryData());
}
HttpPipelineNextSyncPolicy nextPolicy = next.clone();
authorizeRequestSync(context);
HttpResponse httpResponse = next.processSync();
String authHeader = httpResponse.getHeaderValue(WWW_AUTHENTICATE);
if (httpResponse.getStatusCode() == 401 && authHeader != null) {
if (authorizeRequestOnChallengeSync(context, httpResponse)) {
httpResponse.close();
return nextPolicy.processSync();
} else {
return httpResponse;
}
}
return httpResponse;
} | class ContainerRegistryCredentialsPolicy extends BearerTokenAuthenticationPolicy {
private static final ClientLogger LOGGER = new ClientLogger(ContainerRegistryCredentialsPolicy.class);
private static final String BEARER = "Bearer";
public static final Pattern AUTHENTICATION_CHALLENGE_PARAMS_PATTERN =
Pattern.compile("(?:(\\w+)=\"([^\"\"]*)\")+");
public static final String WWW_AUTHENTICATE = "WWW-Authenticate";
public static final String SCOPES_PARAMETER = "scope";
public static final String SERVICE_PARAMETER = "service";
public static final String AUTHORIZATION = "Authorization";
private final ContainerRegistryTokenService tokenService;
private final ClientLogger logger = new ClientLogger(ContainerRegistryCredentialsPolicy.class);
/**
* Creates an instance of ContainerRegistryCredentialsPolicy.
*
* @param tokenService the token generation service.
*/
public ContainerRegistryCredentialsPolicy(ContainerRegistryTokenService tokenService) {
super(tokenService);
this.tokenService = tokenService;
}
/**
* Executed before sending the initial request and authenticates the request.
*
* @param context The request context.
* @return A {@link Mono} containing {@link Void}
*/
@Override
public Mono<Void> authorizeRequest(HttpPipelineCallContext context) {
return Mono.empty();
}
/**
* Authorizes the request with the bearer token acquired using the specified {@code tokenRequestContext}
*
* @param context the HTTP pipeline context.
* @param tokenRequestContext the token request conext to be used for token acquisition.
* @return a {@link Mono} containing {@link Void}
*/
@Override
public Mono<Void> setAuthorizationHeader(HttpPipelineCallContext context, TokenRequestContext tokenRequestContext) {
return tokenService.getToken(tokenRequestContext)
.flatMap((token) -> {
context.getHttpRequest().getHeaders().set(AUTHORIZATION, BEARER + " " + token.getToken());
return Mono.empty();
});
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if ("http".equals(context.getHttpRequest().getUrl().getProtocol())) {
return Mono.error(new RuntimeException("token credentials require a URL using the HTTPS protocol scheme"));
}
if (context.getHttpRequest().getBody() != null) {
context.getHttpRequest().setBody(context.getHttpRequest().getBody().map(buffer -> buffer.duplicate()));
}
HttpPipelineNextPolicy nextPolicy = next.clone();
return authorizeRequest(context)
.then(Mono.defer(() -> next.process()))
.flatMap(httpResponse -> {
String authHeader = httpResponse.getHeaderValue(WWW_AUTHENTICATE);
if (httpResponse.getStatusCode() == 401 && authHeader != null) {
return authorizeRequestOnChallenge(context, httpResponse).flatMap(retry -> {
if (retry) {
return nextPolicy.process()
.doFinally(ignored -> {
httpResponse.close();
});
} else {
return Mono.just(httpResponse);
}
});
}
return Mono.just(httpResponse);
});
}
/**
* Handles the authentication challenge in the event a 401 response with a WWW-Authenticate authentication
* challenge header is received after the initial request and returns appropriate {@link TokenRequestContext} to
* be used for re-authentication.
*
* @param context The request context.
* @param response The Http Response containing the authentication challenge header.
* @return A {@link Mono} containing {@link Boolean}
*/
@Override
public Mono<Boolean> authorizeRequestOnChallenge(HttpPipelineCallContext context, HttpResponse response) {
return Mono.defer(() -> {
String authHeader = response.getHeaderValue(WWW_AUTHENTICATE);
if (!(response.getStatusCode() == 401 && authHeader != null)) {
return Mono.just(false);
} else {
Map<String, String> extractedChallengeParams = parseBearerChallenge(authHeader);
if (extractedChallengeParams != null && extractedChallengeParams.containsKey(SCOPES_PARAMETER)) {
String scope = extractedChallengeParams.get(SCOPES_PARAMETER);
String serviceName = extractedChallengeParams.get(SERVICE_PARAMETER);
return setAuthorizationHeader(context, new ContainerRegistryTokenRequestContext(serviceName, scope))
.then(Mono.defer(() -> Mono.just(true)));
}
return Mono.just(false);
}
});
}
/**
* Executed before sending the initial request and authenticates the request.
*
* @param context The request context.
* @return A {@link Mono} containing {@link Void}
*/
@Override
public void authorizeRequestSync(HttpPipelineCallContext context) {
}
/**
* Authorizes the request with the bearer token acquired using the specified {@code tokenRequestContext}
*
* @param context the HTTP pipeline context.
* @param tokenRequestContext the token request context to be used for token acquisition.
* @return a {@link Mono} containing {@link Void}
*/
@Override
public void setAuthorizationHeaderSync(HttpPipelineCallContext context, TokenRequestContext tokenRequestContext) {
AccessToken token = tokenService.getTokenSync(tokenRequestContext);
context.getHttpRequest().getHeaders().set(AUTHORIZATION, BEARER + " " + token.getToken());
}
@Override
/**
* Handles the authentication challenge in the event a 401 response with a WWW-Authenticate authentication
* challenge header is received after the initial request and returns appropriate {@link TokenRequestContext} to
* be used for re-authentication.
*
* @param context The request context.
* @param response The Http Response containing the authentication challenge header.
* @return A {@link Mono} containing {@link Boolean}
*/
@Override
public boolean authorizeRequestOnChallengeSync(HttpPipelineCallContext context, HttpResponse response) {
String authHeader = response.getHeaderValue(WWW_AUTHENTICATE);
if (!(response.getStatusCode() == 401 && authHeader != null)) {
return false;
} else {
Map<String, String> extractedChallengeParams = parseBearerChallenge(authHeader);
if (extractedChallengeParams != null && extractedChallengeParams.containsKey(SCOPES_PARAMETER)) {
String scope = extractedChallengeParams.get(SCOPES_PARAMETER);
String serviceName = extractedChallengeParams.get(SERVICE_PARAMETER);
setAuthorizationHeaderSync(context, new ContainerRegistryTokenRequestContext(serviceName, scope));
return true;
}
return false;
}
}
private Map<String, String> parseBearerChallenge(String header) {
if (header.startsWith(BEARER)) {
String challengeParams = header.substring(BEARER.length());
Matcher matcher2 = AUTHENTICATION_CHALLENGE_PARAMS_PATTERN.matcher(challengeParams);
Map<String, String> challengeParameters = new HashMap<>();
while (matcher2.find()) {
challengeParameters.put(matcher2.group(1), matcher2.group(2));
}
return challengeParameters;
}
return null;
}
} | class ContainerRegistryCredentialsPolicy extends BearerTokenAuthenticationPolicy {
private static final ClientLogger LOGGER = new ClientLogger(ContainerRegistryCredentialsPolicy.class);
private static final String BEARER = "Bearer";
public static final Pattern AUTHENTICATION_CHALLENGE_PARAMS_PATTERN =
Pattern.compile("(?:(\\w+)=\"([^\"\"]*)\")+");
public static final String WWW_AUTHENTICATE = "WWW-Authenticate";
public static final String SCOPES_PARAMETER = "scope";
public static final String SERVICE_PARAMETER = "service";
public static final String AUTHORIZATION = "Authorization";
private final ContainerRegistryTokenService tokenService;
private final ClientLogger logger = new ClientLogger(ContainerRegistryCredentialsPolicy.class);
/**
* Creates an instance of ContainerRegistryCredentialsPolicy.
*
* @param tokenService the token generation service.
*/
public ContainerRegistryCredentialsPolicy(ContainerRegistryTokenService tokenService) {
super(tokenService);
this.tokenService = tokenService;
}
/**
* Executed before sending the initial request and authenticates the request.
*
* @param context The request context.
* @return A {@link Mono} containing {@link Void}
*/
@Override
public Mono<Void> authorizeRequest(HttpPipelineCallContext context) {
return Mono.empty();
}
/**
* Authorizes the request with the bearer token acquired using the specified {@code tokenRequestContext}
*
* @param context the HTTP pipeline context.
* @param tokenRequestContext the token request conext to be used for token acquisition.
* @return a {@link Mono} containing {@link Void}
*/
@Override
public Mono<Void> setAuthorizationHeader(HttpPipelineCallContext context, TokenRequestContext tokenRequestContext) {
return tokenService.getToken(tokenRequestContext)
.flatMap((token) -> {
context.getHttpRequest().getHeaders().set(AUTHORIZATION, BEARER + " " + token.getToken());
return Mono.empty();
});
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if ("http".equals(context.getHttpRequest().getUrl().getProtocol())) {
return Mono.error(new RuntimeException("token credentials require a URL using the HTTPS protocol scheme"));
}
if (context.getHttpRequest().getBody() != null) {
context.getHttpRequest().setBody(context.getHttpRequest().getBody().map(buffer -> buffer.duplicate()));
}
HttpPipelineNextPolicy nextPolicy = next.clone();
return authorizeRequest(context)
.then(Mono.defer(() -> next.process()))
.flatMap(httpResponse -> {
String authHeader = httpResponse.getHeaderValue(WWW_AUTHENTICATE);
if (httpResponse.getStatusCode() == 401 && authHeader != null) {
return authorizeRequestOnChallenge(context, httpResponse).flatMap(retry -> {
if (retry) {
return nextPolicy.process()
.doFinally(ignored -> {
httpResponse.close();
});
} else {
return Mono.just(httpResponse);
}
});
}
return Mono.just(httpResponse);
});
}
/**
* Handles the authentication challenge in the event a 401 response with a WWW-Authenticate authentication
* challenge header is received after the initial request and returns appropriate {@link TokenRequestContext} to
* be used for re-authentication.
*
* @param context The request context.
* @param response The Http Response containing the authentication challenge header.
* @return A {@link Mono} containing {@link Boolean}
*/
@Override
public Mono<Boolean> authorizeRequestOnChallenge(HttpPipelineCallContext context, HttpResponse response) {
return Mono.defer(() -> {
String authHeader = response.getHeaderValue(WWW_AUTHENTICATE);
if (!(response.getStatusCode() == 401 && authHeader != null)) {
return Mono.just(false);
} else {
Map<String, String> extractedChallengeParams = parseBearerChallenge(authHeader);
if (extractedChallengeParams != null && extractedChallengeParams.containsKey(SCOPES_PARAMETER)) {
String scope = extractedChallengeParams.get(SCOPES_PARAMETER);
String serviceName = extractedChallengeParams.get(SERVICE_PARAMETER);
return setAuthorizationHeader(context, new ContainerRegistryTokenRequestContext(serviceName, scope))
.then(Mono.defer(() -> Mono.just(true)));
}
return Mono.just(false);
}
});
}
/**
* Executed before sending the initial request and authenticates the request.
*
* @param context The request context.
* @return A {@link Mono} containing {@link Void}
*/
@Override
public void authorizeRequestSync(HttpPipelineCallContext context) {
}
/**
* Authorizes the request with the bearer token acquired using the specified {@code tokenRequestContext}
*
* @param context the HTTP pipeline context.
* @param tokenRequestContext the token request context to be used for token acquisition.
* @return a {@link Mono} containing {@link Void}
*/
@Override
public void setAuthorizationHeaderSync(HttpPipelineCallContext context, TokenRequestContext tokenRequestContext) {
AccessToken token = tokenService.getTokenSync(tokenRequestContext);
context.getHttpRequest().getHeaders().set(AUTHORIZATION, BEARER + " " + token.getToken());
}
@Override
/**
* Handles the authentication challenge in the event a 401 response with a WWW-Authenticate authentication
* challenge header is received after the initial request and returns appropriate {@link TokenRequestContext} to
* be used for re-authentication.
*
* @param context The request context.
* @param response The Http Response containing the authentication challenge header.
* @return A {@link Mono} containing {@link Boolean}
*/
@Override
public boolean authorizeRequestOnChallengeSync(HttpPipelineCallContext context, HttpResponse response) {
String authHeader = response.getHeaderValue(WWW_AUTHENTICATE);
if (!(response.getStatusCode() == 401 && authHeader != null)) {
return false;
} else {
Map<String, String> extractedChallengeParams = parseBearerChallenge(authHeader);
if (extractedChallengeParams != null && extractedChallengeParams.containsKey(SCOPES_PARAMETER)) {
String scope = extractedChallengeParams.get(SCOPES_PARAMETER);
String serviceName = extractedChallengeParams.get(SERVICE_PARAMETER);
setAuthorizationHeaderSync(context, new ContainerRegistryTokenRequestContext(serviceName, scope));
return true;
}
return false;
}
}
private Map<String, String> parseBearerChallenge(String header) {
if (header.startsWith(BEARER)) {
String challengeParams = header.substring(BEARER.length());
Matcher matcher2 = AUTHENTICATION_CHALLENGE_PARAMS_PATTERN.matcher(challengeParams);
Map<String, String> challengeParameters = new HashMap<>();
while (matcher2.find()) {
challengeParameters.put(matcher2.group(1), matcher2.group(2));
}
return challengeParameters;
}
return null;
}
} |
Is it possible to correlate a subscriber to its downstream instance? (ie. ReactorReceiver A subscribed to this and was assigned subscriberId.) | void onAdd() {
subscriberId = UUID.randomUUID().toString().substring(0, 6);
processor.logger.atVerbose()
.addKeyValue("subscriberId", subscriberId)
.log("Added subscriber.");
} | subscriberId = UUID.randomUUID().toString().substring(0, 6); | void onAdd() {
Object subscriberIdObj = actual.currentContext().getOrDefault(SUBSCRIBER_ID_KEY, null);
if (subscriberIdObj != null) {
subscriberId = subscriberIdObj.toString();
} else {
subscriberId = StringUtil.getRandomString("un");
}
processor.logger.atVerbose()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Added subscriber.");
} | class ChannelSubscriber<T> extends Operators.MonoSubscriber<T, T> {
private final AmqpChannelProcessor<T> processor;
private String subscriberId = null;
private ChannelSubscriber(CoreSubscriber<? super T> actual, AmqpChannelProcessor<T> processor) {
super(actual);
this.processor = processor;
}
@Override
public void cancel() {
processor.subscribers.remove(this);
super.cancel();
processor.logger.atVerbose()
.addKeyValue("subscriberId", subscriberId)
.log("Canceled subscriber");
}
@Override
public void onComplete() {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onComplete();
processor.logger.atInfo()
.addKeyValue("subscriberId", subscriberId)
.log("AMQP channel processor completed.");
}
}
@Override
public void onNext(T channel) {
if (!isCancelled()) {
processor.subscribers.remove(this);
super.complete(channel);
processor.logger.atInfo()
.addKeyValue("subscriberId", subscriberId)
.log("Next AMQP channel received.");
}
}
@Override
public void onError(Throwable throwable) {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onError(throwable);
processor.logger.atInfo()
.addKeyValue("subscriberId", subscriberId)
.log("Error in AMQP channel processor.");
} else {
Operators.onErrorDropped(throwable, currentContext());
}
}
} | class ChannelSubscriber<T> extends Operators.MonoSubscriber<T, T> {
private final AmqpChannelProcessor<T> processor;
private String subscriberId = null;
private ChannelSubscriber(CoreSubscriber<? super T> actual, AmqpChannelProcessor<T> processor) {
super(actual);
this.processor = processor;
}
@Override
public void cancel() {
processor.subscribers.remove(this);
super.cancel();
processor.logger.atVerbose()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Canceled subscriber");
}
@Override
public void onComplete() {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onComplete();
processor.logger.atInfo()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("AMQP channel processor completed.");
}
}
@Override
public void onNext(T channel) {
if (!isCancelled()) {
processor.subscribers.remove(this);
super.complete(channel);
processor.logger.atInfo()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Next AMQP channel received.");
}
}
@Override
public void onError(Throwable throwable) {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onError(throwable);
processor.logger.atInfo()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Error in AMQP channel processor.");
} else {
Operators.onErrorDropped(throwable, currentContext());
}
}
} |
tried it in the last commit here: https://github.com/Azure/azure-sdk-for-java/pull/29738/commits/5782b42499bb19e384a1bc5caebda413ff5880da We'd have it via `AmqpReceiveLinkProcessor` having Id and having 1:1 relationship with `ReactorReceiver`. the id is propagated via the reactor `Context`. I also added a similar approach to other implementations of `CoreSubscriber`, but it doesn't cover all possible cases - we subscribe a lot, and propagate ids through all of these calls would be hard. But as far as `ChannelSubscriber` is concerned, I believe I covered receive case. LMK what you think and if it makes sense. | void onAdd() {
subscriberId = UUID.randomUUID().toString().substring(0, 6);
processor.logger.atVerbose()
.addKeyValue("subscriberId", subscriberId)
.log("Added subscriber.");
} | subscriberId = UUID.randomUUID().toString().substring(0, 6); | void onAdd() {
Object subscriberIdObj = actual.currentContext().getOrDefault(SUBSCRIBER_ID_KEY, null);
if (subscriberIdObj != null) {
subscriberId = subscriberIdObj.toString();
} else {
subscriberId = StringUtil.getRandomString("un");
}
processor.logger.atVerbose()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Added subscriber.");
} | class ChannelSubscriber<T> extends Operators.MonoSubscriber<T, T> {
private final AmqpChannelProcessor<T> processor;
private String subscriberId = null;
private ChannelSubscriber(CoreSubscriber<? super T> actual, AmqpChannelProcessor<T> processor) {
super(actual);
this.processor = processor;
}
@Override
public void cancel() {
processor.subscribers.remove(this);
super.cancel();
processor.logger.atVerbose()
.addKeyValue("subscriberId", subscriberId)
.log("Canceled subscriber");
}
@Override
public void onComplete() {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onComplete();
processor.logger.atInfo()
.addKeyValue("subscriberId", subscriberId)
.log("AMQP channel processor completed.");
}
}
@Override
public void onNext(T channel) {
if (!isCancelled()) {
processor.subscribers.remove(this);
super.complete(channel);
processor.logger.atInfo()
.addKeyValue("subscriberId", subscriberId)
.log("Next AMQP channel received.");
}
}
@Override
public void onError(Throwable throwable) {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onError(throwable);
processor.logger.atInfo()
.addKeyValue("subscriberId", subscriberId)
.log("Error in AMQP channel processor.");
} else {
Operators.onErrorDropped(throwable, currentContext());
}
}
} | class ChannelSubscriber<T> extends Operators.MonoSubscriber<T, T> {
private final AmqpChannelProcessor<T> processor;
private String subscriberId = null;
private ChannelSubscriber(CoreSubscriber<? super T> actual, AmqpChannelProcessor<T> processor) {
super(actual);
this.processor = processor;
}
@Override
public void cancel() {
processor.subscribers.remove(this);
super.cancel();
processor.logger.atVerbose()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Canceled subscriber");
}
@Override
public void onComplete() {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onComplete();
processor.logger.atInfo()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("AMQP channel processor completed.");
}
}
@Override
public void onNext(T channel) {
if (!isCancelled()) {
processor.subscribers.remove(this);
super.complete(channel);
processor.logger.atInfo()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Next AMQP channel received.");
}
}
@Override
public void onError(Throwable throwable) {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onError(throwable);
processor.logger.atInfo()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Error in AMQP channel processor.");
} else {
Operators.onErrorDropped(throwable, currentContext());
}
}
} |
If the `cachedAddress` is empty, then also we can short circuit here. Not sure whether it will be empty or not. | private AddressInformation[] mergeAddresses(AddressInformation[] newAddresses, AddressInformation[] cachedAddresses) {
checkNotNull(newAddresses, "Argument 'newAddresses' should not be null");
if (cachedAddresses == null) {
return newAddresses;
}
List<AddressInformation> mergedAddresses = new ArrayList<>();
Map<Uri, AddressInformation> cachedAddressMap =
Arrays
.stream(cachedAddresses)
.collect(Collectors.toMap(address -> address.getPhysicalUri(), address -> address));
for (AddressInformation addressInformation : newAddresses) {
if (cachedAddressMap.containsKey(addressInformation.getPhysicalUri())) {
mergedAddresses.add(cachedAddressMap.get(addressInformation.getPhysicalUri()));
} else {
mergedAddresses.add(addressInformation);
}
}
return mergedAddresses.toArray(new AddressInformation[mergedAddresses.size()]);
} | if (cachedAddresses == null) { | private AddressInformation[] mergeAddresses(AddressInformation[] newAddresses, AddressInformation[] cachedAddresses) {
checkNotNull(newAddresses, "Argument 'newAddresses' should not be null");
if (cachedAddresses == null) {
return newAddresses;
}
List<AddressInformation> mergedAddresses = new ArrayList<>();
Map<Uri, List<AddressInformation>> cachedAddressMap =
Arrays
.stream(cachedAddresses)
.collect(Collectors.groupingBy(AddressInformation::getPhysicalUri));
for (AddressInformation newAddress : newAddresses) {
boolean useCachedAddress = false;
if (cachedAddressMap.containsKey(newAddress.getPhysicalUri())) {
for (AddressInformation cachedAddress : cachedAddressMap.get(newAddress.getPhysicalUri())) {
if (newAddress.getProtocol() == cachedAddress.getProtocol()
&& newAddress.isPublic() == cachedAddress.isPublic()
&& newAddress.isPrimary() == cachedAddress.isPrimary()) {
useCachedAddress = true;
mergedAddresses.add(cachedAddress);
break;
}
}
}
if (!useCachedAddress) {
mergedAddresses.add(newAddress);
}
}
return mergedAddresses.toArray(new AddressInformation[mergedAddresses.size()]);
} | class GatewayAddressCache implements IAddressCache {
private final static Duration minDurationBeforeEnforcingCollectionRoutingMapRefresh = Duration.ofSeconds(30);
private final static Logger logger = LoggerFactory.getLogger(GatewayAddressCache.class);
private final static String protocolFilterFormat = "%s eq %s";
private final static int DefaultBatchSize = 50;
private final static int DefaultSuboptimalPartitionForceRefreshIntervalInSeconds = 600;
private final DiagnosticsClientContext clientContext;
private final String databaseFeedEntryUrl = PathsHelper.generatePath(ResourceType.Database, "", true);
private final URI addressEndpoint;
private final AsyncCache<PartitionKeyRangeIdentity, AddressInformation[]> serverPartitionAddressCache;
private final ConcurrentHashMap<PartitionKeyRangeIdentity, Instant> suboptimalServerPartitionTimestamps;
private final long suboptimalPartitionForceRefreshIntervalInSeconds;
private final String protocolScheme;
private final String protocolFilter;
private final IAuthorizationTokenProvider tokenProvider;
private final HashMap<String, String> defaultRequestHeaders;
private final HttpClient httpClient;
private volatile Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterPartitionAddressCache;
private volatile Instant suboptimalMasterPartitionTimestamp;
private final ConcurrentHashMap<URI, Set<PartitionKeyRangeIdentity>> serverPartitionAddressToPkRangeIdMap;
private final boolean tcpConnectionEndpointRediscoveryEnabled;
private final ConcurrentHashMap<String, ForcedRefreshMetadata> lastForcedRefreshMap;
private final GlobalEndpointManager globalEndpointManager;
private IOpenConnectionsHandler openConnectionsHandler;
private final ConnectionPolicy connectionPolicy;
private final boolean replicaAddressValidationEnabled;
public GatewayAddressCache(
DiagnosticsClientContext clientContext,
URI serviceEndpoint,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
UserAgentContainer userAgent,
HttpClient httpClient,
long suboptimalPartitionForceRefreshIntervalInSeconds,
boolean tcpConnectionEndpointRediscoveryEnabled,
ApiType apiType,
GlobalEndpointManager globalEndpointManager,
ConnectionPolicy connectionPolicy,
IOpenConnectionsHandler openConnectionsHandler) {
this.clientContext = clientContext;
try {
this.addressEndpoint = new URL(serviceEndpoint.toURL(), Paths.ADDRESS_PATH_SEGMENT).toURI();
} catch (MalformedURLException | URISyntaxException e) {
logger.error("serviceEndpoint {} is invalid", serviceEndpoint, e);
assert false;
throw new IllegalStateException(e);
}
this.tokenProvider = tokenProvider;
this.serverPartitionAddressCache = new AsyncCache<>();
this.suboptimalServerPartitionTimestamps = new ConcurrentHashMap<>();
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
this.suboptimalPartitionForceRefreshIntervalInSeconds = suboptimalPartitionForceRefreshIntervalInSeconds;
this.protocolScheme = protocol.scheme();
this.protocolFilter = String.format(GatewayAddressCache.protocolFilterFormat,
Constants.Properties.PROTOCOL,
this.protocolScheme);
this.httpClient = httpClient;
if (userAgent == null) {
userAgent = new UserAgentContainer();
}
defaultRequestHeaders = new HashMap<>();
defaultRequestHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgent.getUserAgent());
if(apiType != null) {
defaultRequestHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString());
}
defaultRequestHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION);
this.serverPartitionAddressToPkRangeIdMap = new ConcurrentHashMap<>();
this.tcpConnectionEndpointRediscoveryEnabled = tcpConnectionEndpointRediscoveryEnabled;
this.lastForcedRefreshMap = new ConcurrentHashMap<>();
this.globalEndpointManager = globalEndpointManager;
this.openConnectionsHandler = openConnectionsHandler;
this.connectionPolicy = connectionPolicy;
this.replicaAddressValidationEnabled = Configs.isReplicaAddressValidationEnabled();
}
public GatewayAddressCache(
DiagnosticsClientContext clientContext,
URI serviceEndpoint,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
UserAgentContainer userAgent,
HttpClient httpClient,
boolean tcpConnectionEndpointRediscoveryEnabled,
ApiType apiType,
GlobalEndpointManager globalEndpointManager,
ConnectionPolicy connectionPolicy,
IOpenConnectionsHandler openConnectionsHandler) {
this(clientContext,
serviceEndpoint,
protocol,
tokenProvider,
userAgent,
httpClient,
DefaultSuboptimalPartitionForceRefreshIntervalInSeconds,
tcpConnectionEndpointRediscoveryEnabled,
apiType,
globalEndpointManager,
connectionPolicy,
openConnectionsHandler);
}
@Override
public int updateAddresses(final URI serverKey) {
Objects.requireNonNull(serverKey, "expected non-null serverKey");
AtomicInteger updatedCacheEntryCount = new AtomicInteger(0);
if (this.tcpConnectionEndpointRediscoveryEnabled) {
this.serverPartitionAddressToPkRangeIdMap.computeIfPresent(serverKey, (uri, partitionKeyRangeIdentitySet) -> {
for (PartitionKeyRangeIdentity partitionKeyRangeIdentity : partitionKeyRangeIdentitySet) {
if (partitionKeyRangeIdentity.getPartitionKeyRangeId().equals(PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) {
this.masterPartitionAddressCache = null;
} else {
this.serverPartitionAddressCache.remove(partitionKeyRangeIdentity);
}
updatedCacheEntryCount.incrementAndGet();
}
return null;
});
} else {
logger.warn("tcpConnectionEndpointRediscovery is not enabled, should not reach here.");
}
return updatedCacheEntryCount.get();
}
@Override
public Mono<Utils.ValueHolder<AddressInformation[]>> tryGetAddresses(RxDocumentServiceRequest request,
PartitionKeyRangeIdentity partitionKeyRangeIdentity,
boolean forceRefreshPartitionAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
Utils.checkNotNullOrThrow(partitionKeyRangeIdentity, "partitionKeyRangeIdentity", "");
logger.debug("PartitionKeyRangeIdentity {}, forceRefreshPartitionAddresses {}",
partitionKeyRangeIdentity,
forceRefreshPartitionAddresses);
if (StringUtils.equals(partitionKeyRangeIdentity.getPartitionKeyRangeId(),
PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) {
return this.resolveMasterAsync(request, forceRefreshPartitionAddresses, request.properties)
.map(partitionKeyRangeIdentityPair -> new Utils.ValueHolder<>(partitionKeyRangeIdentityPair.getRight()));
}
evaluateCollectionRoutingMapRefreshForServerPartition(
request, partitionKeyRangeIdentity, forceRefreshPartitionAddresses);
Instant suboptimalServerPartitionTimestamp = this.suboptimalServerPartitionTimestamps.get(partitionKeyRangeIdentity);
if (suboptimalServerPartitionTimestamp != null) {
logger.debug("suboptimalServerPartitionTimestamp is {}", suboptimalServerPartitionTimestamp);
boolean forceRefreshDueToSuboptimalPartitionReplicaSet = Duration.between(suboptimalServerPartitionTimestamp, Instant.now()).getSeconds()
> this.suboptimalPartitionForceRefreshIntervalInSeconds;
if (forceRefreshDueToSuboptimalPartitionReplicaSet) {
Instant newValue = this.suboptimalServerPartitionTimestamps.computeIfPresent(partitionKeyRangeIdentity,
(key, oldVal) -> {
logger.debug("key = {}, oldValue = {}", key, oldVal);
if (suboptimalServerPartitionTimestamp.equals(oldVal)) {
return Instant.MAX;
} else {
return oldVal;
}
});
logger.debug("newValue is {}", newValue);
if (!suboptimalServerPartitionTimestamp.equals(newValue)) {
logger.debug("setting forceRefreshPartitionAddresses to true");
forceRefreshPartitionAddresses = true;
}
}
}
final boolean forceRefreshPartitionAddressesModified = forceRefreshPartitionAddresses;
if (forceRefreshPartitionAddressesModified) {
logger.debug("refresh serverPartitionAddressCache for {}", partitionKeyRangeIdentity);
for (Uri uri : request.requestContext.getFailedEndpoints()) {
uri.setUnhealthy();
}
this.serverPartitionAddressCache.refreshWithInitFunction(
partitionKeyRangeIdentity,
cachedAddresses -> this.getAddressesForRangeId(
request,
partitionKeyRangeIdentity,
true,
cachedAddresses));
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
}
Mono<Utils.ValueHolder<AddressInformation[]>> addressesObs =
this.serverPartitionAddressCache
.getAsyncWithInitFunction(
partitionKeyRangeIdentity,
null,
cachedAddresses -> this.getAddressesForRangeId(
request,
partitionKeyRangeIdentity,
false,
cachedAddresses))
.map(Utils.ValueHolder::new);
return addressesObs
.map(
addressesValueHolder -> {
if (notAllReplicasAvailable(addressesValueHolder.v)) {
if (logger.isDebugEnabled()) {
logger.debug("not all replicas available {}", JavaStreamUtils.info(addressesValueHolder.v));
}
this.suboptimalServerPartitionTimestamps.putIfAbsent(partitionKeyRangeIdentity, Instant.now());
}
if (Arrays
.stream(addressesValueHolder.v)
.anyMatch(addressInformation -> addressInformation.getPhysicalUri().shouldRefreshHealthStatus())) {
logger.info("refresh cache due to address uri in unhealthy status");
this.serverPartitionAddressCache.refreshWithInitFunction(
partitionKeyRangeIdentity,
cachedAddresses -> this.getAddressesForRangeId(
request,
partitionKeyRangeIdentity,
true,
cachedAddresses));
}
return addressesValueHolder;
})
.onErrorResume(ex -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(ex);
CosmosException dce = Utils.as(unwrappedException, CosmosException.class);
if (dce == null) {
logger.error("unexpected failure", ex);
if (forceRefreshPartitionAddressesModified) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
}
return Mono.error(unwrappedException);
} else {
logger.debug("tryGetAddresses dce", dce);
if (Exceptions.isStatusCode(dce, HttpConstants.StatusCodes.NOTFOUND) ||
Exceptions.isStatusCode(dce, HttpConstants.StatusCodes.GONE) ||
Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE)) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
logger.debug("tryGetAddresses: inner onErrorResumeNext return null", dce);
return Mono.just(new Utils.ValueHolder<>(null));
}
return Mono.error(unwrappedException);
}
});
}
@Override
public void setOpenConnectionsHandler(IOpenConnectionsHandler openConnectionsHandler) {
this.openConnectionsHandler = openConnectionsHandler;
}
public Mono<List<Address>> getServerAddressesViaGatewayAsync(
RxDocumentServiceRequest request,
String collectionRid,
List<String> partitionKeyRangeIds,
boolean forceRefresh) {
if (logger.isDebugEnabled()) {
logger.debug("getServerAddressesViaGatewayAsync collectionRid {}, partitionKeyRangeIds {}", collectionRid,
JavaStreamUtils.toString(partitionKeyRangeIds, ","));
}
request.setAddressRefresh(true, forceRefresh);
String entryUrl = PathsHelper.generatePath(ResourceType.Document, collectionRid, true);
HashMap<String, String> addressQuery = new HashMap<>();
addressQuery.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl));
HashMap<String, String> headers = new HashMap<>(defaultRequestHeaders);
if (forceRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, "true");
}
if (request.forceCollectionRoutingMapRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_COLLECTION_ROUTING_MAP_REFRESH, "true");
}
addressQuery.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter));
addressQuery.put(HttpConstants.QueryStrings.PARTITION_KEY_RANGE_IDS, String.join(",", partitionKeyRangeIds));
headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
String token = null;
try {
token = this.tokenProvider.getUserAuthorizationToken(
collectionRid,
ResourceType.Document,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
request.properties);
} catch (UnauthorizedException e) {
if (logger.isDebugEnabled()) {
logger.debug("User doesn't have resource token for collection rid {}", collectionRid);
}
}
if (token == null && request.getIsNameBased()) {
String collectionAltLink = PathsHelper.getCollectionPath(request.getResourceAddress());
token = this.tokenProvider.getUserAuthorizationToken(
collectionAltLink,
ResourceType.Document,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
request.properties);
}
token = HttpUtils.urlEncode(token);
headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, token);
}
URI targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(addressQuery));
String identifier = logAddressResolutionStart(
request, targetEndpoint, forceRefresh, request.forceCollectionRoutingMapRefresh);
HttpHeaders httpHeaders = new HttpHeaders(headers);
Instant addressCallStartTime = Instant.now();
HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders);
Mono<HttpResponse> httpResponseMono;
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
httpResponseMono = this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()));
} else {
httpResponseMono = tokenProvider
.populateAuthorizationHeader(httpHeaders)
.flatMap(valueHttpHeaders -> this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds())));
}
Mono<RxDocumentServiceResponse> dsrObs = HttpClientUtils.parseResponseAsync(request, clientContext, httpResponseMono, httpRequest);
return dsrObs.map(
dsr -> {
MetadataDiagnosticsContext metadataDiagnosticsContext =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (metadataDiagnosticsContext != null) {
Instant addressCallEndTime = Instant.now();
MetadataDiagnostics metaDataDiagnostic = new MetadataDiagnostics(addressCallStartTime,
addressCallEndTime,
MetadataType.SERVER_ADDRESS_LOOKUP);
metadataDiagnosticsContext.addMetaDataDiagnostic(metaDataDiagnostic);
}
if (logger.isDebugEnabled()) {
logger.debug("getServerAddressesViaGatewayAsync deserializes result");
}
logAddressResolutionEnd(request, identifier, null);
return dsr.getQueryResponse(null, Address.class);
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
logAddressResolutionEnd(request, identifier, unwrappedException.toString());
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(
request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, this.globalEndpointManager);
}
return Mono.error(dce);
});
}
public void dispose() {
}
private Mono<Pair<PartitionKeyRangeIdentity, AddressInformation[]>> resolveMasterAsync(RxDocumentServiceRequest request, boolean forceRefresh, Map<String, Object> properties) {
logger.debug("resolveMasterAsync forceRefresh: {}", forceRefresh);
Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterAddressAndRangeInitial = this.masterPartitionAddressCache;
forceRefresh = forceRefresh ||
(masterAddressAndRangeInitial != null &&
notAllReplicasAvailable(masterAddressAndRangeInitial.getRight()) &&
Duration.between(this.suboptimalMasterPartitionTimestamp, Instant.now()).getSeconds() > this.suboptimalPartitionForceRefreshIntervalInSeconds);
if (forceRefresh || this.masterPartitionAddressCache == null) {
Mono<List<Address>> masterReplicaAddressesObs = this.getMasterAddressesViaGatewayAsync(
request,
ResourceType.Database,
null,
databaseFeedEntryUrl,
forceRefresh,
false,
properties);
return masterReplicaAddressesObs.map(
masterAddresses -> {
Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterAddressAndRangeRes =
this.toPartitionAddressAndRange("", masterAddresses);
this.masterPartitionAddressCache = masterAddressAndRangeRes;
if (notAllReplicasAvailable(masterAddressAndRangeRes.getRight())
&& this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) {
this.suboptimalMasterPartitionTimestamp = Instant.now();
} else {
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
}
return masterPartitionAddressCache;
})
.doOnError(
e -> {
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
});
} else {
if (notAllReplicasAvailable(masterAddressAndRangeInitial.getRight())
&& this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) {
this.suboptimalMasterPartitionTimestamp = Instant.now();
}
return Mono.just(masterAddressAndRangeInitial);
}
}
private void evaluateCollectionRoutingMapRefreshForServerPartition(
RxDocumentServiceRequest request,
PartitionKeyRangeIdentity pkRangeIdentity,
boolean forceRefreshPartitionAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
validatePkRangeIdentity(pkRangeIdentity);
String collectionRid = pkRangeIdentity.getCollectionRid();
String partitionKeyRangeId = pkRangeIdentity.getPartitionKeyRangeId();
if (forceRefreshPartitionAddresses) {
ForcedRefreshMetadata forcedRefreshMetadata = this.lastForcedRefreshMap.computeIfAbsent(
collectionRid,
(colRid) -> new ForcedRefreshMetadata());
if (request.forceCollectionRoutingMapRefresh) {
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
true);
} else if (forcedRefreshMetadata.shouldIncludeCollectionRoutingMapRefresh(pkRangeIdentity)) {
request.forceCollectionRoutingMapRefresh = true;
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
true);
} else {
forcedRefreshMetadata.signalPartitionAddressOnlyRefresh(pkRangeIdentity);
}
} else if (request.forceCollectionRoutingMapRefresh) {
ForcedRefreshMetadata forcedRefreshMetadata = this.lastForcedRefreshMap.computeIfAbsent(
collectionRid,
(colRid) -> new ForcedRefreshMetadata());
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
false);
}
logger.debug("evaluateCollectionRoutingMapRefreshForServerPartition collectionRid {}, partitionKeyRangeId {},"
+ " " +
"forceRefreshPartitionAddresses {}, forceCollectionRoutingMapRefresh {}",
collectionRid,
partitionKeyRangeId,
forceRefreshPartitionAddresses,
request.forceCollectionRoutingMapRefresh);
}
private void validatePkRangeIdentity(PartitionKeyRangeIdentity pkRangeIdentity) {
Utils.checkNotNullOrThrow(pkRangeIdentity, "pkRangeId", "");
Utils.checkNotNullOrThrow(
pkRangeIdentity.getCollectionRid(),
"pkRangeId.getCollectionRid()",
"");
Utils.checkNotNullOrThrow(
pkRangeIdentity.getPartitionKeyRangeId(),
"pkRangeId.getPartitionKeyRangeId()",
"");
}
private Mono<AddressInformation[]> getAddressesForRangeId(
RxDocumentServiceRequest request,
PartitionKeyRangeIdentity pkRangeIdentity,
boolean forceRefresh,
AddressInformation[] cachedAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
validatePkRangeIdentity(pkRangeIdentity);
String collectionRid = pkRangeIdentity.getCollectionRid();
String partitionKeyRangeId = pkRangeIdentity.getPartitionKeyRangeId();
logger.debug(
"getAddressesForRangeId collectionRid {}, partitionKeyRangeId {}, forceRefresh {}",
collectionRid,
partitionKeyRangeId,
forceRefresh);
Mono<List<Address>> addressResponse = this.getServerAddressesViaGatewayAsync(request, collectionRid, Collections.singletonList(partitionKeyRangeId), forceRefresh);
Mono<List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>>> addressInfos =
addressResponse.map(
addresses -> {
if (logger.isDebugEnabled()) {
logger.debug("addresses from getServerAddressesViaGatewayAsync in getAddressesForRangeId {}",
JavaStreamUtils.info(addresses));
}
return addresses
.stream()
.filter(addressInfo -> this.protocolScheme.equals(addressInfo.getProtocolScheme()))
.collect(Collectors.groupingBy(Address::getParitionKeyRangeId))
.values()
.stream()
.map(groupedAddresses -> toPartitionAddressAndRange(collectionRid, addresses))
.collect(Collectors.toList());
});
Mono<List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>>> result =
addressInfos
.map(addressInfo -> addressInfo.stream()
.filter(a -> StringUtils.equals(a.getLeft().getPartitionKeyRangeId(), partitionKeyRangeId))
.collect(Collectors.toList()));
return result
.flatMap(
list -> {
if (logger.isDebugEnabled()) {
logger.debug("getAddressesForRangeId flatMap got result {}", JavaStreamUtils.info(list));
}
if (list.isEmpty()) {
String errorMessage = String.format(
RMResources.PartitionKeyRangeNotFound,
partitionKeyRangeId,
collectionRid);
PartitionKeyRangeGoneException e = new PartitionKeyRangeGoneException(errorMessage);
BridgeInternal.setResourceAddress(e, collectionRid);
return Mono.error(e);
} else {
AddressInformation[] mergedAddresses = this.mergeAddresses(list.get(0).getRight(), cachedAddresses);
for (AddressInformation address : mergedAddresses) {
address.getPhysicalUri().setRefreshed();
}
if (this.replicaAddressValidationEnabled) {
this.validateReplicaAddresses(mergedAddresses);
}
return Mono.just(mergedAddresses);
}
})
.doOnError(e -> logger.debug("getAddressesForRangeId", e));
}
public Mono<List<Address>> getMasterAddressesViaGatewayAsync(
RxDocumentServiceRequest request,
ResourceType resourceType,
String resourceAddress,
String entryUrl,
boolean forceRefresh,
boolean useMasterCollectionResolver,
Map<String, Object> properties) {
logger.debug("getMasterAddressesViaGatewayAsync " +
"resourceType {}, " +
"resourceAddress {}, " +
"entryUrl {}, " +
"forceRefresh {}, " +
"useMasterCollectionResolver {}",
resourceType,
resourceAddress,
entryUrl,
forceRefresh,
useMasterCollectionResolver
);
request.setAddressRefresh(true, forceRefresh);
HashMap<String, String> queryParameters = new HashMap<>();
queryParameters.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl));
HashMap<String, String> headers = new HashMap<>(defaultRequestHeaders);
if (forceRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, "true");
}
if (useMasterCollectionResolver) {
headers.put(HttpConstants.HttpHeaders.USE_MASTER_COLLECTION_RESOLVER, "true");
}
if(request.forceCollectionRoutingMapRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_COLLECTION_ROUTING_MAP_REFRESH, "true");
}
queryParameters.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter));
headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
String token = this.tokenProvider.getUserAuthorizationToken(
resourceAddress,
resourceType,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
properties);
headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, HttpUtils.urlEncode(token));
}
URI targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(queryParameters));
String identifier = logAddressResolutionStart(
request, targetEndpoint, true, true);
HttpHeaders defaultHttpHeaders = new HttpHeaders(headers);
HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), defaultHttpHeaders);
Instant addressCallStartTime = Instant.now();
Mono<HttpResponse> httpResponseMono;
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
httpResponseMono = this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()));
} else {
httpResponseMono = tokenProvider
.populateAuthorizationHeader(defaultHttpHeaders)
.flatMap(valueHttpHeaders -> this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds())));
}
Mono<RxDocumentServiceResponse> dsrObs = HttpClientUtils.parseResponseAsync(request, this.clientContext, httpResponseMono, httpRequest);
return dsrObs.map(
dsr -> {
MetadataDiagnosticsContext metadataDiagnosticsContext =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (metadataDiagnosticsContext != null) {
Instant addressCallEndTime = Instant.now();
MetadataDiagnostics metaDataDiagnostic = new MetadataDiagnostics(addressCallStartTime,
addressCallEndTime,
MetadataType.MASTER_ADDRESS_LOOK_UP);
metadataDiagnosticsContext.addMetaDataDiagnostic(metaDataDiagnostic);
}
logAddressResolutionEnd(request, identifier, null);
return dsr.getQueryResponse(null, Address.class);
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
logAddressResolutionEnd(request, identifier, unwrappedException.toString());
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(
request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, this.globalEndpointManager);
}
return Mono.error(dce);
});
}
/***
* merge the new addresses get back from gateway with the cached addresses.
* If the address is being returned from gateway again, then keep using the cached addressInformation object
* If it is a new address being returned, then use the new addressInformation object.
*
* @param newAddresses the latest addresses being returned from gateway.
* @param cachedAddresses the cached addresses.
*
* @return the merged addresses.
*/
private void validateReplicaAddresses(AddressInformation[] addresses) {
checkNotNull(addresses, "Argument 'addresses' can not be null");
List<Uri> addressesNeedToValidation =
Arrays
.stream(addresses)
.map(address -> address.getPhysicalUri())
.filter(addressUri -> addressUri.getHealthStatus() == Uri.HealthStatus.UnhealthyPending)
.collect(Collectors.toList());
if (addressesNeedToValidation.size() > 0) {
this.openConnectionsHandler
.openConnections(addressesNeedToValidation)
.subscribeOn(CosmosSchedulers.OPEN_CONNECTIONS_BOUNDED_ELASTIC)
.subscribe();
}
}
private Pair<PartitionKeyRangeIdentity, AddressInformation[]> toPartitionAddressAndRange(String collectionRid, List<Address> addresses) {
if (logger.isDebugEnabled()) {
logger.debug("toPartitionAddressAndRange");
}
Address address = addresses.get(0);
PartitionKeyRangeIdentity partitionKeyRangeIdentity = new PartitionKeyRangeIdentity(collectionRid, address.getParitionKeyRangeId());
AddressInformation[] addressInfos =
addresses
.stream()
.map(addr -> GatewayAddressCache.toAddressInformation(addr))
.collect(Collectors.toList())
.toArray(new AddressInformation[addresses.size()]);
if (this.tcpConnectionEndpointRediscoveryEnabled) {
for (AddressInformation addressInfo : addressInfos) {
if (logger.isDebugEnabled()) {
logger.debug(
"Added address to serverPartitionAddressToPkRangeIdMap: ({\"partitionKeyRangeIdentity\":{},\"address\":{}})",
partitionKeyRangeIdentity,
addressInfo);
}
this.serverPartitionAddressToPkRangeIdMap.compute(addressInfo.getServerKey(), (serverKey, partitionKeyRangeIdentitySet) -> {
if (partitionKeyRangeIdentitySet == null) {
partitionKeyRangeIdentitySet = ConcurrentHashMap.newKeySet();
}
partitionKeyRangeIdentitySet.add(partitionKeyRangeIdentity);
return partitionKeyRangeIdentitySet;
});
}
}
return Pair.of(partitionKeyRangeIdentity, addressInfos);
}
private static AddressInformation toAddressInformation(Address address) {
return new AddressInformation(true, address.isPrimary(), address.getPhyicalUri(), address.getProtocolScheme());
}
public Flux<OpenConnectionResponse> openConnectionsAndInitCaches(
DocumentCollection collection,
List<PartitionKeyRangeIdentity> partitionKeyRangeIdentities) {
checkNotNull(collection, "Argument 'collection' should not be null");
checkNotNull(partitionKeyRangeIdentities, "Argument 'partitionKeyRangeIdentities' should not be null");
if (logger.isDebugEnabled()) {
logger.debug(
"openConnectionsAndInitCaches collection: {}, partitionKeyRangeIdentities: {}",
collection.getResourceId(),
JavaStreamUtils.toString(partitionKeyRangeIdentities, ","));
}
List<Flux<List<Address>>> tasks = new ArrayList<>();
int batchSize = GatewayAddressCache.DefaultBatchSize;
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this.clientContext,
OperationType.Read,
collection.getResourceId(),
ResourceType.DocumentCollection,
Collections.emptyMap());
for (int i = 0; i < partitionKeyRangeIdentities.size(); i += batchSize) {
int endIndex = i + batchSize;
endIndex = Math.min(endIndex, partitionKeyRangeIdentities.size());
tasks.add(
this.getServerAddressesViaGatewayWithRetry(
request,
collection.getResourceId(),
partitionKeyRangeIdentities
.subList(i, endIndex)
.stream()
.map(PartitionKeyRangeIdentity::getPartitionKeyRangeId)
.collect(Collectors.toList()),
false).flux());
}
return Flux.concat(tasks)
.flatMap(list -> {
List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>> addressInfos =
list.stream()
.filter(addressInfo -> this.protocolScheme.equals(addressInfo.getProtocolScheme()))
.collect(Collectors.groupingBy(Address::getParitionKeyRangeId))
.values()
.stream().map(addresses -> toPartitionAddressAndRange(collection.getResourceId(), addresses))
.collect(Collectors.toList());
return Flux.fromIterable(addressInfos)
.flatMap(addressInfo -> {
this.serverPartitionAddressCache.set(addressInfo.getLeft(), addressInfo.getRight());
if (this.openConnectionsHandler != null) {
return this.openConnectionsHandler.openConnections(
Arrays
.stream(addressInfo.getRight())
.map(addressInformation -> addressInformation.getPhysicalUri())
.collect(Collectors.toList()));
}
logger.info("OpenConnectionHandler is null, can not open connections");
return Flux.empty();
});
});
}
private Mono<List<Address>> getServerAddressesViaGatewayWithRetry(
RxDocumentServiceRequest request,
String collectionRid,
List<String> partitionKeyRangeIds,
boolean forceRefresh) {
OpenConnectionAndInitCachesRetryPolicy openConnectionAndInitCachesRetryPolicy =
new OpenConnectionAndInitCachesRetryPolicy(this.connectionPolicy.getThrottlingRetryOptions());
return BackoffRetryUtility.executeRetry(
() -> this.getServerAddressesViaGatewayAsync(request, collectionRid, partitionKeyRangeIds, forceRefresh),
openConnectionAndInitCachesRetryPolicy);
}
private boolean notAllReplicasAvailable(AddressInformation[] addressInformations) {
return addressInformations.length < ServiceConfig.SystemReplicationPolicy.MaxReplicaSetSize;
}
private static String logAddressResolutionStart(
RxDocumentServiceRequest request,
URI targetEndpointUrl,
boolean forceRefresh,
boolean forceCollectionRoutingMapRefresh) {
if (request.requestContext.cosmosDiagnostics != null) {
return BridgeInternal.recordAddressResolutionStart(
request.requestContext.cosmosDiagnostics,
targetEndpointUrl,
forceRefresh,
forceCollectionRoutingMapRefresh);
}
return null;
}
private static void logAddressResolutionEnd(RxDocumentServiceRequest request, String identifier, String errorMessage) {
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordAddressResolutionEnd(request.requestContext.cosmosDiagnostics, identifier, errorMessage);
}
}
private static class ForcedRefreshMetadata {
private final ConcurrentHashMap<PartitionKeyRangeIdentity, Instant> lastPartitionAddressOnlyRefresh;
private Instant lastCollectionRoutingMapRefresh;
public ForcedRefreshMetadata() {
lastPartitionAddressOnlyRefresh = new ConcurrentHashMap<>();
lastCollectionRoutingMapRefresh = Instant.now();
}
public void signalCollectionRoutingMapRefresh(
PartitionKeyRangeIdentity pk,
boolean forcePartitionAddressRefresh) {
Instant nowSnapshot = Instant.now();
if (forcePartitionAddressRefresh) {
lastPartitionAddressOnlyRefresh.put(pk, nowSnapshot);
}
lastCollectionRoutingMapRefresh = nowSnapshot;
}
public void signalPartitionAddressOnlyRefresh(PartitionKeyRangeIdentity pk) {
lastPartitionAddressOnlyRefresh.put(pk, Instant.now());
}
public boolean shouldIncludeCollectionRoutingMapRefresh(PartitionKeyRangeIdentity pk) {
Instant lastPartitionAddressRefreshSnapshot = lastPartitionAddressOnlyRefresh.get(pk);
Instant lastCollectionRoutingMapRefreshSnapshot = lastCollectionRoutingMapRefresh;
if (lastPartitionAddressRefreshSnapshot == null ||
!lastPartitionAddressRefreshSnapshot.isAfter(lastCollectionRoutingMapRefreshSnapshot)) {
return false;
}
Duration durationSinceLastForcedCollectionRoutingMapRefresh =
Duration.between(lastCollectionRoutingMapRefreshSnapshot, Instant.now());
boolean returnValue = durationSinceLastForcedCollectionRoutingMapRefresh
.compareTo(minDurationBeforeEnforcingCollectionRoutingMapRefresh) >= 0;
return returnValue;
}
}
} | class GatewayAddressCache implements IAddressCache {
private final static Duration minDurationBeforeEnforcingCollectionRoutingMapRefresh = Duration.ofSeconds(30);
private final static Logger logger = LoggerFactory.getLogger(GatewayAddressCache.class);
private final static String protocolFilterFormat = "%s eq %s";
private final static int DefaultBatchSize = 50;
private final static int DefaultSuboptimalPartitionForceRefreshIntervalInSeconds = 600;
private final DiagnosticsClientContext clientContext;
private final String databaseFeedEntryUrl = PathsHelper.generatePath(ResourceType.Database, "", true);
private final URI addressEndpoint;
private final AsyncCacheNonBlocking<PartitionKeyRangeIdentity, AddressInformation[]> serverPartitionAddressCache;
private final ConcurrentHashMap<PartitionKeyRangeIdentity, Instant> suboptimalServerPartitionTimestamps;
private final long suboptimalPartitionForceRefreshIntervalInSeconds;
private final String protocolScheme;
private final String protocolFilter;
private final IAuthorizationTokenProvider tokenProvider;
private final HashMap<String, String> defaultRequestHeaders;
private final HttpClient httpClient;
private volatile Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterPartitionAddressCache;
private volatile Instant suboptimalMasterPartitionTimestamp;
private final ConcurrentHashMap<URI, Set<PartitionKeyRangeIdentity>> serverPartitionAddressToPkRangeIdMap;
private final boolean tcpConnectionEndpointRediscoveryEnabled;
private final ConcurrentHashMap<String, ForcedRefreshMetadata> lastForcedRefreshMap;
private final GlobalEndpointManager globalEndpointManager;
private IOpenConnectionsHandler openConnectionsHandler;
private final ConnectionPolicy connectionPolicy;
private final boolean replicaAddressValidationEnabled;
public GatewayAddressCache(
DiagnosticsClientContext clientContext,
URI serviceEndpoint,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
UserAgentContainer userAgent,
HttpClient httpClient,
long suboptimalPartitionForceRefreshIntervalInSeconds,
boolean tcpConnectionEndpointRediscoveryEnabled,
ApiType apiType,
GlobalEndpointManager globalEndpointManager,
ConnectionPolicy connectionPolicy,
IOpenConnectionsHandler openConnectionsHandler) {
this.clientContext = clientContext;
try {
this.addressEndpoint = new URL(serviceEndpoint.toURL(), Paths.ADDRESS_PATH_SEGMENT).toURI();
} catch (MalformedURLException | URISyntaxException e) {
logger.error("serviceEndpoint {} is invalid", serviceEndpoint, e);
assert false;
throw new IllegalStateException(e);
}
this.tokenProvider = tokenProvider;
this.serverPartitionAddressCache = new AsyncCacheNonBlocking<>();
this.suboptimalServerPartitionTimestamps = new ConcurrentHashMap<>();
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
this.suboptimalPartitionForceRefreshIntervalInSeconds = suboptimalPartitionForceRefreshIntervalInSeconds;
this.protocolScheme = protocol.scheme();
this.protocolFilter = String.format(GatewayAddressCache.protocolFilterFormat,
Constants.Properties.PROTOCOL,
this.protocolScheme);
this.httpClient = httpClient;
if (userAgent == null) {
userAgent = new UserAgentContainer();
}
defaultRequestHeaders = new HashMap<>();
defaultRequestHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgent.getUserAgent());
if(apiType != null) {
defaultRequestHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString());
}
defaultRequestHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION);
this.serverPartitionAddressToPkRangeIdMap = new ConcurrentHashMap<>();
this.tcpConnectionEndpointRediscoveryEnabled = tcpConnectionEndpointRediscoveryEnabled;
this.lastForcedRefreshMap = new ConcurrentHashMap<>();
this.globalEndpointManager = globalEndpointManager;
this.openConnectionsHandler = openConnectionsHandler;
this.connectionPolicy = connectionPolicy;
this.replicaAddressValidationEnabled = Configs.isReplicaAddressValidationEnabled();
}
public GatewayAddressCache(
DiagnosticsClientContext clientContext,
URI serviceEndpoint,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
UserAgentContainer userAgent,
HttpClient httpClient,
boolean tcpConnectionEndpointRediscoveryEnabled,
ApiType apiType,
GlobalEndpointManager globalEndpointManager,
ConnectionPolicy connectionPolicy,
IOpenConnectionsHandler openConnectionsHandler) {
this(clientContext,
serviceEndpoint,
protocol,
tokenProvider,
userAgent,
httpClient,
DefaultSuboptimalPartitionForceRefreshIntervalInSeconds,
tcpConnectionEndpointRediscoveryEnabled,
apiType,
globalEndpointManager,
connectionPolicy,
openConnectionsHandler);
}
@Override
public int updateAddresses(final URI serverKey) {
Objects.requireNonNull(serverKey, "expected non-null serverKey");
AtomicInteger updatedCacheEntryCount = new AtomicInteger(0);
if (this.tcpConnectionEndpointRediscoveryEnabled) {
this.serverPartitionAddressToPkRangeIdMap.computeIfPresent(serverKey, (uri, partitionKeyRangeIdentitySet) -> {
for (PartitionKeyRangeIdentity partitionKeyRangeIdentity : partitionKeyRangeIdentitySet) {
if (partitionKeyRangeIdentity.getPartitionKeyRangeId().equals(PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) {
this.masterPartitionAddressCache = null;
} else {
this.serverPartitionAddressCache.remove(partitionKeyRangeIdentity);
}
updatedCacheEntryCount.incrementAndGet();
}
return null;
});
} else {
logger.warn("tcpConnectionEndpointRediscovery is not enabled, should not reach here.");
}
return updatedCacheEntryCount.get();
}
@Override
public Mono<Utils.ValueHolder<AddressInformation[]>> tryGetAddresses(RxDocumentServiceRequest request,
PartitionKeyRangeIdentity partitionKeyRangeIdentity,
boolean forceRefreshPartitionAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
Utils.checkNotNullOrThrow(partitionKeyRangeIdentity, "partitionKeyRangeIdentity", "");
logger.debug("PartitionKeyRangeIdentity {}, forceRefreshPartitionAddresses {}",
partitionKeyRangeIdentity,
forceRefreshPartitionAddresses);
if (StringUtils.equals(partitionKeyRangeIdentity.getPartitionKeyRangeId(),
PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) {
return this.resolveMasterAsync(request, forceRefreshPartitionAddresses, request.properties)
.map(partitionKeyRangeIdentityPair -> new Utils.ValueHolder<>(partitionKeyRangeIdentityPair.getRight()));
}
evaluateCollectionRoutingMapRefreshForServerPartition(
request, partitionKeyRangeIdentity, forceRefreshPartitionAddresses);
Instant suboptimalServerPartitionTimestamp = this.suboptimalServerPartitionTimestamps.get(partitionKeyRangeIdentity);
if (suboptimalServerPartitionTimestamp != null) {
logger.debug("suboptimalServerPartitionTimestamp is {}", suboptimalServerPartitionTimestamp);
boolean forceRefreshDueToSuboptimalPartitionReplicaSet = Duration.between(suboptimalServerPartitionTimestamp, Instant.now()).getSeconds()
> this.suboptimalPartitionForceRefreshIntervalInSeconds;
if (forceRefreshDueToSuboptimalPartitionReplicaSet) {
Instant newValue = this.suboptimalServerPartitionTimestamps.computeIfPresent(partitionKeyRangeIdentity,
(key, oldVal) -> {
logger.debug("key = {}, oldValue = {}", key, oldVal);
if (suboptimalServerPartitionTimestamp.equals(oldVal)) {
return Instant.MAX;
} else {
return oldVal;
}
});
logger.debug("newValue is {}", newValue);
if (!suboptimalServerPartitionTimestamp.equals(newValue)) {
logger.debug("setting forceRefreshPartitionAddresses to true");
forceRefreshPartitionAddresses = true;
}
}
}
final boolean forceRefreshPartitionAddressesModified = forceRefreshPartitionAddresses;
if (forceRefreshPartitionAddressesModified) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
}
Mono<Utils.ValueHolder<AddressInformation[]>> addressesObs =
this.serverPartitionAddressCache
.getAsync(
partitionKeyRangeIdentity,
cachedAddresses -> this.getAddressesForRangeId(
request,
partitionKeyRangeIdentity,
forceRefreshPartitionAddressesModified,
cachedAddresses),
cachedAddresses -> {
for (Uri failedEndpoints : request.requestContext.getFailedEndpoints()) {
failedEndpoints.setUnhealthy();
}
return forceRefreshPartitionAddressesModified
|| Arrays.stream(cachedAddresses).anyMatch(addressInformation -> addressInformation.getPhysicalUri().shouldRefreshHealthStatus());
})
.map(Utils.ValueHolder::new);
return addressesObs
.map(addressesValueHolder -> {
if (notAllReplicasAvailable(addressesValueHolder.v)) {
if (logger.isDebugEnabled()) {
logger.debug("not all replicas available {}", JavaStreamUtils.info(addressesValueHolder.v));
}
this.suboptimalServerPartitionTimestamps.putIfAbsent(partitionKeyRangeIdentity, Instant.now());
}
return addressesValueHolder;
})
.onErrorResume(ex -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(ex);
CosmosException dce = Utils.as(unwrappedException, CosmosException.class);
if (dce == null) {
logger.error("unexpected failure", ex);
if (forceRefreshPartitionAddressesModified) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
}
return Mono.error(unwrappedException);
} else {
logger.debug("tryGetAddresses dce", dce);
if (Exceptions.isNotFound(dce) ||
Exceptions.isGone(dce) ||
Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE)) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
logger.debug("tryGetAddresses: inner onErrorResumeNext return null", dce);
return Mono.just(new Utils.ValueHolder<>(null));
}
return Mono.error(unwrappedException);
}
});
}
@Override
public void setOpenConnectionsHandler(IOpenConnectionsHandler openConnectionsHandler) {
this.openConnectionsHandler = openConnectionsHandler;
}
public Mono<List<Address>> getServerAddressesViaGatewayAsync(
RxDocumentServiceRequest request,
String collectionRid,
List<String> partitionKeyRangeIds,
boolean forceRefresh) {
if (logger.isDebugEnabled()) {
logger.debug("getServerAddressesViaGatewayAsync collectionRid {}, partitionKeyRangeIds {}", collectionRid,
JavaStreamUtils.toString(partitionKeyRangeIds, ","));
}
request.setAddressRefresh(true, forceRefresh);
String entryUrl = PathsHelper.generatePath(ResourceType.Document, collectionRid, true);
HashMap<String, String> addressQuery = new HashMap<>();
addressQuery.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl));
HashMap<String, String> headers = new HashMap<>(defaultRequestHeaders);
if (forceRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, "true");
}
if (request.forceCollectionRoutingMapRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_COLLECTION_ROUTING_MAP_REFRESH, "true");
}
addressQuery.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter));
addressQuery.put(HttpConstants.QueryStrings.PARTITION_KEY_RANGE_IDS, String.join(",", partitionKeyRangeIds));
headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
String token = null;
try {
token = this.tokenProvider.getUserAuthorizationToken(
collectionRid,
ResourceType.Document,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
request.properties);
} catch (UnauthorizedException e) {
if (logger.isDebugEnabled()) {
logger.debug("User doesn't have resource token for collection rid {}", collectionRid);
}
}
if (token == null && request.getIsNameBased()) {
String collectionAltLink = PathsHelper.getCollectionPath(request.getResourceAddress());
token = this.tokenProvider.getUserAuthorizationToken(
collectionAltLink,
ResourceType.Document,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
request.properties);
}
token = HttpUtils.urlEncode(token);
headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, token);
}
URI targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(addressQuery));
String identifier = logAddressResolutionStart(
request, targetEndpoint, forceRefresh, request.forceCollectionRoutingMapRefresh);
HttpHeaders httpHeaders = new HttpHeaders(headers);
Instant addressCallStartTime = Instant.now();
HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders);
Mono<HttpResponse> httpResponseMono;
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
httpResponseMono = this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()));
} else {
httpResponseMono = tokenProvider
.populateAuthorizationHeader(httpHeaders)
.flatMap(valueHttpHeaders -> this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds())));
}
Mono<RxDocumentServiceResponse> dsrObs = HttpClientUtils.parseResponseAsync(request, clientContext, httpResponseMono, httpRequest);
return dsrObs.map(
dsr -> {
MetadataDiagnosticsContext metadataDiagnosticsContext =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (metadataDiagnosticsContext != null) {
Instant addressCallEndTime = Instant.now();
MetadataDiagnostics metaDataDiagnostic = new MetadataDiagnostics(addressCallStartTime,
addressCallEndTime,
MetadataType.SERVER_ADDRESS_LOOKUP);
metadataDiagnosticsContext.addMetaDataDiagnostic(metaDataDiagnostic);
}
if (logger.isDebugEnabled()) {
logger.debug("getServerAddressesViaGatewayAsync deserializes result");
}
logAddressResolutionEnd(request, identifier, null);
return dsr.getQueryResponse(null, Address.class);
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
logAddressResolutionEnd(request, identifier, unwrappedException.toString());
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(
request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, this.globalEndpointManager);
}
return Mono.error(dce);
});
}
public void dispose() {
}
private Mono<Pair<PartitionKeyRangeIdentity, AddressInformation[]>> resolveMasterAsync(RxDocumentServiceRequest request, boolean forceRefresh, Map<String, Object> properties) {
logger.debug("resolveMasterAsync forceRefresh: {}", forceRefresh);
Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterAddressAndRangeInitial = this.masterPartitionAddressCache;
forceRefresh = forceRefresh ||
(masterAddressAndRangeInitial != null &&
notAllReplicasAvailable(masterAddressAndRangeInitial.getRight()) &&
Duration.between(this.suboptimalMasterPartitionTimestamp, Instant.now()).getSeconds() > this.suboptimalPartitionForceRefreshIntervalInSeconds);
if (forceRefresh || this.masterPartitionAddressCache == null) {
Mono<List<Address>> masterReplicaAddressesObs = this.getMasterAddressesViaGatewayAsync(
request,
ResourceType.Database,
null,
databaseFeedEntryUrl,
forceRefresh,
false,
properties);
return masterReplicaAddressesObs.map(
masterAddresses -> {
Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterAddressAndRangeRes =
this.toPartitionAddressAndRange("", masterAddresses);
this.masterPartitionAddressCache = masterAddressAndRangeRes;
if (notAllReplicasAvailable(masterAddressAndRangeRes.getRight())
&& this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) {
this.suboptimalMasterPartitionTimestamp = Instant.now();
} else {
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
}
return masterPartitionAddressCache;
})
.doOnError(
e -> {
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
});
} else {
if (notAllReplicasAvailable(masterAddressAndRangeInitial.getRight())
&& this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) {
this.suboptimalMasterPartitionTimestamp = Instant.now();
}
return Mono.just(masterAddressAndRangeInitial);
}
}
private void evaluateCollectionRoutingMapRefreshForServerPartition(
RxDocumentServiceRequest request,
PartitionKeyRangeIdentity pkRangeIdentity,
boolean forceRefreshPartitionAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
validatePkRangeIdentity(pkRangeIdentity);
String collectionRid = pkRangeIdentity.getCollectionRid();
String partitionKeyRangeId = pkRangeIdentity.getPartitionKeyRangeId();
if (forceRefreshPartitionAddresses) {
ForcedRefreshMetadata forcedRefreshMetadata = this.lastForcedRefreshMap.computeIfAbsent(
collectionRid,
(colRid) -> new ForcedRefreshMetadata());
if (request.forceCollectionRoutingMapRefresh) {
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
true);
} else if (forcedRefreshMetadata.shouldIncludeCollectionRoutingMapRefresh(pkRangeIdentity)) {
request.forceCollectionRoutingMapRefresh = true;
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
true);
} else {
forcedRefreshMetadata.signalPartitionAddressOnlyRefresh(pkRangeIdentity);
}
} else if (request.forceCollectionRoutingMapRefresh) {
ForcedRefreshMetadata forcedRefreshMetadata = this.lastForcedRefreshMap.computeIfAbsent(
collectionRid,
(colRid) -> new ForcedRefreshMetadata());
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
false);
}
logger.debug("evaluateCollectionRoutingMapRefreshForServerPartition collectionRid {}, partitionKeyRangeId {},"
+ " " +
"forceRefreshPartitionAddresses {}, forceCollectionRoutingMapRefresh {}",
collectionRid,
partitionKeyRangeId,
forceRefreshPartitionAddresses,
request.forceCollectionRoutingMapRefresh);
}
private void validatePkRangeIdentity(PartitionKeyRangeIdentity pkRangeIdentity) {
Utils.checkNotNullOrThrow(pkRangeIdentity, "pkRangeId", "");
Utils.checkNotNullOrThrow(
pkRangeIdentity.getCollectionRid(),
"pkRangeId.getCollectionRid()",
"");
Utils.checkNotNullOrThrow(
pkRangeIdentity.getPartitionKeyRangeId(),
"pkRangeId.getPartitionKeyRangeId()",
"");
}
private Mono<AddressInformation[]> getAddressesForRangeId(
RxDocumentServiceRequest request,
PartitionKeyRangeIdentity pkRangeIdentity,
boolean forceRefresh,
AddressInformation[] cachedAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
validatePkRangeIdentity(pkRangeIdentity);
String collectionRid = pkRangeIdentity.getCollectionRid();
String partitionKeyRangeId = pkRangeIdentity.getPartitionKeyRangeId();
logger.debug(
"getAddressesForRangeId collectionRid {}, partitionKeyRangeId {}, forceRefresh {}",
collectionRid,
partitionKeyRangeId,
forceRefresh);
Mono<List<Address>> addressResponse = this.getServerAddressesViaGatewayAsync(request, collectionRid, Collections.singletonList(partitionKeyRangeId), forceRefresh);
Mono<List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>>> addressInfos =
addressResponse.map(
addresses -> {
if (logger.isDebugEnabled()) {
logger.debug("addresses from getServerAddressesViaGatewayAsync in getAddressesForRangeId {}",
JavaStreamUtils.info(addresses));
}
return addresses
.stream()
.filter(addressInfo -> this.protocolScheme.equals(addressInfo.getProtocolScheme()))
.collect(Collectors.groupingBy(Address::getParitionKeyRangeId))
.values()
.stream()
.map(groupedAddresses -> toPartitionAddressAndRange(collectionRid, addresses))
.collect(Collectors.toList());
});
Mono<List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>>> result =
addressInfos
.map(addressInfo -> addressInfo.stream()
.filter(a -> StringUtils.equals(a.getLeft().getPartitionKeyRangeId(), partitionKeyRangeId))
.collect(Collectors.toList()));
return result
.flatMap(
list -> {
if (logger.isDebugEnabled()) {
logger.debug("getAddressesForRangeId flatMap got result {}", JavaStreamUtils.info(list));
}
if (list.isEmpty()) {
String errorMessage = String.format(
RMResources.PartitionKeyRangeNotFound,
partitionKeyRangeId,
collectionRid);
PartitionKeyRangeGoneException e = new PartitionKeyRangeGoneException(errorMessage);
BridgeInternal.setResourceAddress(e, collectionRid);
return Mono.error(e);
} else {
AddressInformation[] mergedAddresses = this.mergeAddresses(list.get(0).getRight(), cachedAddresses);
for (AddressInformation address : mergedAddresses) {
address.getPhysicalUri().setRefreshed();
}
if (this.replicaAddressValidationEnabled) {
this.validateReplicaAddresses(mergedAddresses);
}
return Mono.just(mergedAddresses);
}
})
.doOnError(e -> logger.debug("getAddressesForRangeId", e));
}
public Mono<List<Address>> getMasterAddressesViaGatewayAsync(
RxDocumentServiceRequest request,
ResourceType resourceType,
String resourceAddress,
String entryUrl,
boolean forceRefresh,
boolean useMasterCollectionResolver,
Map<String, Object> properties) {
logger.debug("getMasterAddressesViaGatewayAsync " +
"resourceType {}, " +
"resourceAddress {}, " +
"entryUrl {}, " +
"forceRefresh {}, " +
"useMasterCollectionResolver {}",
resourceType,
resourceAddress,
entryUrl,
forceRefresh,
useMasterCollectionResolver
);
request.setAddressRefresh(true, forceRefresh);
HashMap<String, String> queryParameters = new HashMap<>();
queryParameters.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl));
HashMap<String, String> headers = new HashMap<>(defaultRequestHeaders);
if (forceRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, "true");
}
if (useMasterCollectionResolver) {
headers.put(HttpConstants.HttpHeaders.USE_MASTER_COLLECTION_RESOLVER, "true");
}
if(request.forceCollectionRoutingMapRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_COLLECTION_ROUTING_MAP_REFRESH, "true");
}
queryParameters.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter));
headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
String token = this.tokenProvider.getUserAuthorizationToken(
resourceAddress,
resourceType,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
properties);
headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, HttpUtils.urlEncode(token));
}
URI targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(queryParameters));
String identifier = logAddressResolutionStart(
request, targetEndpoint, true, true);
HttpHeaders defaultHttpHeaders = new HttpHeaders(headers);
HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), defaultHttpHeaders);
Instant addressCallStartTime = Instant.now();
Mono<HttpResponse> httpResponseMono;
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
httpResponseMono = this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()));
} else {
httpResponseMono = tokenProvider
.populateAuthorizationHeader(defaultHttpHeaders)
.flatMap(valueHttpHeaders -> this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds())));
}
Mono<RxDocumentServiceResponse> dsrObs = HttpClientUtils.parseResponseAsync(request, this.clientContext, httpResponseMono, httpRequest);
return dsrObs.map(
dsr -> {
MetadataDiagnosticsContext metadataDiagnosticsContext =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (metadataDiagnosticsContext != null) {
Instant addressCallEndTime = Instant.now();
MetadataDiagnostics metaDataDiagnostic = new MetadataDiagnostics(addressCallStartTime,
addressCallEndTime,
MetadataType.MASTER_ADDRESS_LOOK_UP);
metadataDiagnosticsContext.addMetaDataDiagnostic(metaDataDiagnostic);
}
logAddressResolutionEnd(request, identifier, null);
return dsr.getQueryResponse(null, Address.class);
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
logAddressResolutionEnd(request, identifier, unwrappedException.toString());
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(
request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, this.globalEndpointManager);
}
return Mono.error(dce);
});
}
/***
* merge the new addresses get back from gateway with the cached addresses.
* If the address is being returned from gateway again, then keep using the cached addressInformation object
* If it is a new address being returned, then use the new addressInformation object.
*
* @param newAddresses the latest addresses being returned from gateway.
* @param cachedAddresses the cached addresses.
*
* @return the merged addresses.
*/
private void validateReplicaAddresses(AddressInformation[] addresses) {
checkNotNull(addresses, "Argument 'addresses' can not be null");
List<Uri> addressesNeedToValidation =
Arrays
.stream(addresses)
.map(address -> address.getPhysicalUri())
.filter(addressUri -> addressUri.getHealthStatus() == Uri.HealthStatus.UnhealthyPending)
.collect(Collectors.toList());
if (addressesNeedToValidation.size() > 0) {
this.openConnectionsHandler
.openConnections(addressesNeedToValidation)
.subscribeOn(CosmosSchedulers.OPEN_CONNECTIONS_BOUNDED_ELASTIC)
.subscribe();
}
}
private Pair<PartitionKeyRangeIdentity, AddressInformation[]> toPartitionAddressAndRange(String collectionRid, List<Address> addresses) {
if (logger.isDebugEnabled()) {
logger.debug("toPartitionAddressAndRange");
}
Address address = addresses.get(0);
PartitionKeyRangeIdentity partitionKeyRangeIdentity = new PartitionKeyRangeIdentity(collectionRid, address.getParitionKeyRangeId());
AddressInformation[] addressInfos =
addresses
.stream()
.map(addr -> GatewayAddressCache.toAddressInformation(addr))
.collect(Collectors.toList())
.toArray(new AddressInformation[addresses.size()]);
if (this.tcpConnectionEndpointRediscoveryEnabled) {
for (AddressInformation addressInfo : addressInfos) {
if (logger.isDebugEnabled()) {
logger.debug(
"Added address to serverPartitionAddressToPkRangeIdMap: ({\"partitionKeyRangeIdentity\":{},\"address\":{}})",
partitionKeyRangeIdentity,
addressInfo);
}
this.serverPartitionAddressToPkRangeIdMap.compute(addressInfo.getServerKey(), (serverKey, partitionKeyRangeIdentitySet) -> {
if (partitionKeyRangeIdentitySet == null) {
partitionKeyRangeIdentitySet = ConcurrentHashMap.newKeySet();
}
partitionKeyRangeIdentitySet.add(partitionKeyRangeIdentity);
return partitionKeyRangeIdentitySet;
});
}
}
return Pair.of(partitionKeyRangeIdentity, addressInfos);
}
private static AddressInformation toAddressInformation(Address address) {
return new AddressInformation(true, address.isPrimary(), address.getPhyicalUri(), address.getProtocolScheme());
}
public Flux<OpenConnectionResponse> openConnectionsAndInitCaches(
DocumentCollection collection,
List<PartitionKeyRangeIdentity> partitionKeyRangeIdentities) {
checkNotNull(collection, "Argument 'collection' should not be null");
checkNotNull(partitionKeyRangeIdentities, "Argument 'partitionKeyRangeIdentities' should not be null");
if (logger.isDebugEnabled()) {
logger.debug(
"openConnectionsAndInitCaches collection: {}, partitionKeyRangeIdentities: {}",
collection.getResourceId(),
JavaStreamUtils.toString(partitionKeyRangeIdentities, ","));
}
List<Flux<List<Address>>> tasks = new ArrayList<>();
int batchSize = GatewayAddressCache.DefaultBatchSize;
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this.clientContext,
OperationType.Read,
collection.getResourceId(),
ResourceType.DocumentCollection,
Collections.emptyMap());
for (int i = 0; i < partitionKeyRangeIdentities.size(); i += batchSize) {
int endIndex = i + batchSize;
endIndex = Math.min(endIndex, partitionKeyRangeIdentities.size());
tasks.add(
this.getServerAddressesViaGatewayWithRetry(
request,
collection.getResourceId(),
partitionKeyRangeIdentities
.subList(i, endIndex)
.stream()
.map(PartitionKeyRangeIdentity::getPartitionKeyRangeId)
.collect(Collectors.toList()),
false).flux());
}
return Flux.concat(tasks)
.flatMap(list -> {
List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>> addressInfos =
list.stream()
.filter(addressInfo -> this.protocolScheme.equals(addressInfo.getProtocolScheme()))
.collect(Collectors.groupingBy(Address::getParitionKeyRangeId))
.values()
.stream().map(addresses -> toPartitionAddressAndRange(collection.getResourceId(), addresses))
.collect(Collectors.toList());
return Flux.fromIterable(addressInfos)
.flatMap(addressInfo -> {
this.serverPartitionAddressCache.set(addressInfo.getLeft(), addressInfo.getRight());
if (this.openConnectionsHandler != null) {
return this.openConnectionsHandler.openConnections(
Arrays
.stream(addressInfo.getRight())
.map(addressInformation -> addressInformation.getPhysicalUri())
.collect(Collectors.toList()));
}
logger.info("OpenConnectionHandler is null, can not open connections");
return Flux.empty();
});
});
}
private Mono<List<Address>> getServerAddressesViaGatewayWithRetry(
RxDocumentServiceRequest request,
String collectionRid,
List<String> partitionKeyRangeIds,
boolean forceRefresh) {
OpenConnectionAndInitCachesRetryPolicy openConnectionAndInitCachesRetryPolicy =
new OpenConnectionAndInitCachesRetryPolicy(this.connectionPolicy.getThrottlingRetryOptions());
return BackoffRetryUtility.executeRetry(
() -> this.getServerAddressesViaGatewayAsync(request, collectionRid, partitionKeyRangeIds, forceRefresh),
openConnectionAndInitCachesRetryPolicy);
}
private boolean notAllReplicasAvailable(AddressInformation[] addressInformations) {
return addressInformations.length < ServiceConfig.SystemReplicationPolicy.MaxReplicaSetSize;
}
private static String logAddressResolutionStart(
RxDocumentServiceRequest request,
URI targetEndpointUrl,
boolean forceRefresh,
boolean forceCollectionRoutingMapRefresh) {
if (request.requestContext.cosmosDiagnostics != null) {
return BridgeInternal.recordAddressResolutionStart(
request.requestContext.cosmosDiagnostics,
targetEndpointUrl,
forceRefresh,
forceCollectionRoutingMapRefresh);
}
return null;
}
private static void logAddressResolutionEnd(RxDocumentServiceRequest request, String identifier, String errorMessage) {
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordAddressResolutionEnd(request.requestContext.cosmosDiagnostics, identifier, errorMessage);
}
}
private static class ForcedRefreshMetadata {
private final ConcurrentHashMap<PartitionKeyRangeIdentity, Instant> lastPartitionAddressOnlyRefresh;
private Instant lastCollectionRoutingMapRefresh;
public ForcedRefreshMetadata() {
lastPartitionAddressOnlyRefresh = new ConcurrentHashMap<>();
lastCollectionRoutingMapRefresh = Instant.now();
}
public void signalCollectionRoutingMapRefresh(
PartitionKeyRangeIdentity pk,
boolean forcePartitionAddressRefresh) {
Instant nowSnapshot = Instant.now();
if (forcePartitionAddressRefresh) {
lastPartitionAddressOnlyRefresh.put(pk, nowSnapshot);
}
lastCollectionRoutingMapRefresh = nowSnapshot;
}
public void signalPartitionAddressOnlyRefresh(PartitionKeyRangeIdentity pk) {
lastPartitionAddressOnlyRefresh.put(pk, Instant.now());
}
public boolean shouldIncludeCollectionRoutingMapRefresh(PartitionKeyRangeIdentity pk) {
Instant lastPartitionAddressRefreshSnapshot = lastPartitionAddressOnlyRefresh.get(pk);
Instant lastCollectionRoutingMapRefreshSnapshot = lastCollectionRoutingMapRefresh;
if (lastPartitionAddressRefreshSnapshot == null ||
!lastPartitionAddressRefreshSnapshot.isAfter(lastCollectionRoutingMapRefreshSnapshot)) {
return false;
}
Duration durationSinceLastForcedCollectionRoutingMapRefresh =
Duration.between(lastCollectionRoutingMapRefreshSnapshot, Instant.now());
boolean returnValue = durationSinceLastForcedCollectionRoutingMapRefresh
.compareTo(minDurationBeforeEnforcingCollectionRoutingMapRefresh) >= 0;
return returnValue;
}
}
} |
Instead of creating a new array, I think we can use the existing map to keep track of new addresses. ``` Map<Uri, AddressInformation> cachedAddressMap = Arrays .stream(cachedAddresses) .collect(Collectors.toMap(address -> address.getPhysicalUri(), address -> address)); for (AddressInformation addressInformation : newAddresses) { if (!cachedAddressMap.containsKey(addressInformation.getPhysicalUri())) { cachedAddressMap.put(addressInformation.getPhysicalUri(), addressInformation); } } return cachedAddressMap.values().toArray(new AddressInformation[0]); ``` | private AddressInformation[] mergeAddresses(AddressInformation[] newAddresses, AddressInformation[] cachedAddresses) {
checkNotNull(newAddresses, "Argument 'newAddresses' should not be null");
if (cachedAddresses == null) {
return newAddresses;
}
List<AddressInformation> mergedAddresses = new ArrayList<>();
Map<Uri, AddressInformation> cachedAddressMap =
Arrays
.stream(cachedAddresses)
.collect(Collectors.toMap(address -> address.getPhysicalUri(), address -> address));
for (AddressInformation addressInformation : newAddresses) {
if (cachedAddressMap.containsKey(addressInformation.getPhysicalUri())) {
mergedAddresses.add(cachedAddressMap.get(addressInformation.getPhysicalUri()));
} else {
mergedAddresses.add(addressInformation);
}
}
return mergedAddresses.toArray(new AddressInformation[mergedAddresses.size()]);
} | return mergedAddresses.toArray(new AddressInformation[mergedAddresses.size()]); | private AddressInformation[] mergeAddresses(AddressInformation[] newAddresses, AddressInformation[] cachedAddresses) {
checkNotNull(newAddresses, "Argument 'newAddresses' should not be null");
if (cachedAddresses == null) {
return newAddresses;
}
List<AddressInformation> mergedAddresses = new ArrayList<>();
Map<Uri, List<AddressInformation>> cachedAddressMap =
Arrays
.stream(cachedAddresses)
.collect(Collectors.groupingBy(AddressInformation::getPhysicalUri));
for (AddressInformation newAddress : newAddresses) {
boolean useCachedAddress = false;
if (cachedAddressMap.containsKey(newAddress.getPhysicalUri())) {
for (AddressInformation cachedAddress : cachedAddressMap.get(newAddress.getPhysicalUri())) {
if (newAddress.getProtocol() == cachedAddress.getProtocol()
&& newAddress.isPublic() == cachedAddress.isPublic()
&& newAddress.isPrimary() == cachedAddress.isPrimary()) {
useCachedAddress = true;
mergedAddresses.add(cachedAddress);
break;
}
}
}
if (!useCachedAddress) {
mergedAddresses.add(newAddress);
}
}
return mergedAddresses.toArray(new AddressInformation[mergedAddresses.size()]);
} | class GatewayAddressCache implements IAddressCache {
private final static Duration minDurationBeforeEnforcingCollectionRoutingMapRefresh = Duration.ofSeconds(30);
private final static Logger logger = LoggerFactory.getLogger(GatewayAddressCache.class);
private final static String protocolFilterFormat = "%s eq %s";
private final static int DefaultBatchSize = 50;
private final static int DefaultSuboptimalPartitionForceRefreshIntervalInSeconds = 600;
private final DiagnosticsClientContext clientContext;
private final String databaseFeedEntryUrl = PathsHelper.generatePath(ResourceType.Database, "", true);
private final URI addressEndpoint;
private final AsyncCache<PartitionKeyRangeIdentity, AddressInformation[]> serverPartitionAddressCache;
private final ConcurrentHashMap<PartitionKeyRangeIdentity, Instant> suboptimalServerPartitionTimestamps;
private final long suboptimalPartitionForceRefreshIntervalInSeconds;
private final String protocolScheme;
private final String protocolFilter;
private final IAuthorizationTokenProvider tokenProvider;
private final HashMap<String, String> defaultRequestHeaders;
private final HttpClient httpClient;
private volatile Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterPartitionAddressCache;
private volatile Instant suboptimalMasterPartitionTimestamp;
private final ConcurrentHashMap<URI, Set<PartitionKeyRangeIdentity>> serverPartitionAddressToPkRangeIdMap;
private final boolean tcpConnectionEndpointRediscoveryEnabled;
private final ConcurrentHashMap<String, ForcedRefreshMetadata> lastForcedRefreshMap;
private final GlobalEndpointManager globalEndpointManager;
private IOpenConnectionsHandler openConnectionsHandler;
private final ConnectionPolicy connectionPolicy;
private final boolean replicaAddressValidationEnabled;
public GatewayAddressCache(
DiagnosticsClientContext clientContext,
URI serviceEndpoint,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
UserAgentContainer userAgent,
HttpClient httpClient,
long suboptimalPartitionForceRefreshIntervalInSeconds,
boolean tcpConnectionEndpointRediscoveryEnabled,
ApiType apiType,
GlobalEndpointManager globalEndpointManager,
ConnectionPolicy connectionPolicy,
IOpenConnectionsHandler openConnectionsHandler) {
this.clientContext = clientContext;
try {
this.addressEndpoint = new URL(serviceEndpoint.toURL(), Paths.ADDRESS_PATH_SEGMENT).toURI();
} catch (MalformedURLException | URISyntaxException e) {
logger.error("serviceEndpoint {} is invalid", serviceEndpoint, e);
assert false;
throw new IllegalStateException(e);
}
this.tokenProvider = tokenProvider;
this.serverPartitionAddressCache = new AsyncCache<>();
this.suboptimalServerPartitionTimestamps = new ConcurrentHashMap<>();
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
this.suboptimalPartitionForceRefreshIntervalInSeconds = suboptimalPartitionForceRefreshIntervalInSeconds;
this.protocolScheme = protocol.scheme();
this.protocolFilter = String.format(GatewayAddressCache.protocolFilterFormat,
Constants.Properties.PROTOCOL,
this.protocolScheme);
this.httpClient = httpClient;
if (userAgent == null) {
userAgent = new UserAgentContainer();
}
defaultRequestHeaders = new HashMap<>();
defaultRequestHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgent.getUserAgent());
if(apiType != null) {
defaultRequestHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString());
}
defaultRequestHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION);
this.serverPartitionAddressToPkRangeIdMap = new ConcurrentHashMap<>();
this.tcpConnectionEndpointRediscoveryEnabled = tcpConnectionEndpointRediscoveryEnabled;
this.lastForcedRefreshMap = new ConcurrentHashMap<>();
this.globalEndpointManager = globalEndpointManager;
this.openConnectionsHandler = openConnectionsHandler;
this.connectionPolicy = connectionPolicy;
this.replicaAddressValidationEnabled = Configs.isReplicaAddressValidationEnabled();
}
public GatewayAddressCache(
DiagnosticsClientContext clientContext,
URI serviceEndpoint,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
UserAgentContainer userAgent,
HttpClient httpClient,
boolean tcpConnectionEndpointRediscoveryEnabled,
ApiType apiType,
GlobalEndpointManager globalEndpointManager,
ConnectionPolicy connectionPolicy,
IOpenConnectionsHandler openConnectionsHandler) {
this(clientContext,
serviceEndpoint,
protocol,
tokenProvider,
userAgent,
httpClient,
DefaultSuboptimalPartitionForceRefreshIntervalInSeconds,
tcpConnectionEndpointRediscoveryEnabled,
apiType,
globalEndpointManager,
connectionPolicy,
openConnectionsHandler);
}
@Override
public int updateAddresses(final URI serverKey) {
Objects.requireNonNull(serverKey, "expected non-null serverKey");
AtomicInteger updatedCacheEntryCount = new AtomicInteger(0);
if (this.tcpConnectionEndpointRediscoveryEnabled) {
this.serverPartitionAddressToPkRangeIdMap.computeIfPresent(serverKey, (uri, partitionKeyRangeIdentitySet) -> {
for (PartitionKeyRangeIdentity partitionKeyRangeIdentity : partitionKeyRangeIdentitySet) {
if (partitionKeyRangeIdentity.getPartitionKeyRangeId().equals(PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) {
this.masterPartitionAddressCache = null;
} else {
this.serverPartitionAddressCache.remove(partitionKeyRangeIdentity);
}
updatedCacheEntryCount.incrementAndGet();
}
return null;
});
} else {
logger.warn("tcpConnectionEndpointRediscovery is not enabled, should not reach here.");
}
return updatedCacheEntryCount.get();
}
@Override
public Mono<Utils.ValueHolder<AddressInformation[]>> tryGetAddresses(RxDocumentServiceRequest request,
PartitionKeyRangeIdentity partitionKeyRangeIdentity,
boolean forceRefreshPartitionAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
Utils.checkNotNullOrThrow(partitionKeyRangeIdentity, "partitionKeyRangeIdentity", "");
logger.debug("PartitionKeyRangeIdentity {}, forceRefreshPartitionAddresses {}",
partitionKeyRangeIdentity,
forceRefreshPartitionAddresses);
if (StringUtils.equals(partitionKeyRangeIdentity.getPartitionKeyRangeId(),
PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) {
return this.resolveMasterAsync(request, forceRefreshPartitionAddresses, request.properties)
.map(partitionKeyRangeIdentityPair -> new Utils.ValueHolder<>(partitionKeyRangeIdentityPair.getRight()));
}
evaluateCollectionRoutingMapRefreshForServerPartition(
request, partitionKeyRangeIdentity, forceRefreshPartitionAddresses);
Instant suboptimalServerPartitionTimestamp = this.suboptimalServerPartitionTimestamps.get(partitionKeyRangeIdentity);
if (suboptimalServerPartitionTimestamp != null) {
logger.debug("suboptimalServerPartitionTimestamp is {}", suboptimalServerPartitionTimestamp);
boolean forceRefreshDueToSuboptimalPartitionReplicaSet = Duration.between(suboptimalServerPartitionTimestamp, Instant.now()).getSeconds()
> this.suboptimalPartitionForceRefreshIntervalInSeconds;
if (forceRefreshDueToSuboptimalPartitionReplicaSet) {
Instant newValue = this.suboptimalServerPartitionTimestamps.computeIfPresent(partitionKeyRangeIdentity,
(key, oldVal) -> {
logger.debug("key = {}, oldValue = {}", key, oldVal);
if (suboptimalServerPartitionTimestamp.equals(oldVal)) {
return Instant.MAX;
} else {
return oldVal;
}
});
logger.debug("newValue is {}", newValue);
if (!suboptimalServerPartitionTimestamp.equals(newValue)) {
logger.debug("setting forceRefreshPartitionAddresses to true");
forceRefreshPartitionAddresses = true;
}
}
}
final boolean forceRefreshPartitionAddressesModified = forceRefreshPartitionAddresses;
if (forceRefreshPartitionAddressesModified) {
logger.debug("refresh serverPartitionAddressCache for {}", partitionKeyRangeIdentity);
for (Uri uri : request.requestContext.getFailedEndpoints()) {
uri.setUnhealthy();
}
this.serverPartitionAddressCache.refreshWithInitFunction(
partitionKeyRangeIdentity,
cachedAddresses -> this.getAddressesForRangeId(
request,
partitionKeyRangeIdentity,
true,
cachedAddresses));
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
}
Mono<Utils.ValueHolder<AddressInformation[]>> addressesObs =
this.serverPartitionAddressCache
.getAsyncWithInitFunction(
partitionKeyRangeIdentity,
null,
cachedAddresses -> this.getAddressesForRangeId(
request,
partitionKeyRangeIdentity,
false,
cachedAddresses))
.map(Utils.ValueHolder::new);
return addressesObs
.map(
addressesValueHolder -> {
if (notAllReplicasAvailable(addressesValueHolder.v)) {
if (logger.isDebugEnabled()) {
logger.debug("not all replicas available {}", JavaStreamUtils.info(addressesValueHolder.v));
}
this.suboptimalServerPartitionTimestamps.putIfAbsent(partitionKeyRangeIdentity, Instant.now());
}
if (Arrays
.stream(addressesValueHolder.v)
.anyMatch(addressInformation -> addressInformation.getPhysicalUri().shouldRefreshHealthStatus())) {
logger.info("refresh cache due to address uri in unhealthy status");
this.serverPartitionAddressCache.refreshWithInitFunction(
partitionKeyRangeIdentity,
cachedAddresses -> this.getAddressesForRangeId(
request,
partitionKeyRangeIdentity,
true,
cachedAddresses));
}
return addressesValueHolder;
})
.onErrorResume(ex -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(ex);
CosmosException dce = Utils.as(unwrappedException, CosmosException.class);
if (dce == null) {
logger.error("unexpected failure", ex);
if (forceRefreshPartitionAddressesModified) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
}
return Mono.error(unwrappedException);
} else {
logger.debug("tryGetAddresses dce", dce);
if (Exceptions.isStatusCode(dce, HttpConstants.StatusCodes.NOTFOUND) ||
Exceptions.isStatusCode(dce, HttpConstants.StatusCodes.GONE) ||
Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE)) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
logger.debug("tryGetAddresses: inner onErrorResumeNext return null", dce);
return Mono.just(new Utils.ValueHolder<>(null));
}
return Mono.error(unwrappedException);
}
});
}
@Override
public void setOpenConnectionsHandler(IOpenConnectionsHandler openConnectionsHandler) {
this.openConnectionsHandler = openConnectionsHandler;
}
public Mono<List<Address>> getServerAddressesViaGatewayAsync(
RxDocumentServiceRequest request,
String collectionRid,
List<String> partitionKeyRangeIds,
boolean forceRefresh) {
if (logger.isDebugEnabled()) {
logger.debug("getServerAddressesViaGatewayAsync collectionRid {}, partitionKeyRangeIds {}", collectionRid,
JavaStreamUtils.toString(partitionKeyRangeIds, ","));
}
request.setAddressRefresh(true, forceRefresh);
String entryUrl = PathsHelper.generatePath(ResourceType.Document, collectionRid, true);
HashMap<String, String> addressQuery = new HashMap<>();
addressQuery.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl));
HashMap<String, String> headers = new HashMap<>(defaultRequestHeaders);
if (forceRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, "true");
}
if (request.forceCollectionRoutingMapRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_COLLECTION_ROUTING_MAP_REFRESH, "true");
}
addressQuery.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter));
addressQuery.put(HttpConstants.QueryStrings.PARTITION_KEY_RANGE_IDS, String.join(",", partitionKeyRangeIds));
headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
String token = null;
try {
token = this.tokenProvider.getUserAuthorizationToken(
collectionRid,
ResourceType.Document,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
request.properties);
} catch (UnauthorizedException e) {
if (logger.isDebugEnabled()) {
logger.debug("User doesn't have resource token for collection rid {}", collectionRid);
}
}
if (token == null && request.getIsNameBased()) {
String collectionAltLink = PathsHelper.getCollectionPath(request.getResourceAddress());
token = this.tokenProvider.getUserAuthorizationToken(
collectionAltLink,
ResourceType.Document,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
request.properties);
}
token = HttpUtils.urlEncode(token);
headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, token);
}
URI targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(addressQuery));
String identifier = logAddressResolutionStart(
request, targetEndpoint, forceRefresh, request.forceCollectionRoutingMapRefresh);
HttpHeaders httpHeaders = new HttpHeaders(headers);
Instant addressCallStartTime = Instant.now();
HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders);
Mono<HttpResponse> httpResponseMono;
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
httpResponseMono = this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()));
} else {
httpResponseMono = tokenProvider
.populateAuthorizationHeader(httpHeaders)
.flatMap(valueHttpHeaders -> this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds())));
}
Mono<RxDocumentServiceResponse> dsrObs = HttpClientUtils.parseResponseAsync(request, clientContext, httpResponseMono, httpRequest);
return dsrObs.map(
dsr -> {
MetadataDiagnosticsContext metadataDiagnosticsContext =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (metadataDiagnosticsContext != null) {
Instant addressCallEndTime = Instant.now();
MetadataDiagnostics metaDataDiagnostic = new MetadataDiagnostics(addressCallStartTime,
addressCallEndTime,
MetadataType.SERVER_ADDRESS_LOOKUP);
metadataDiagnosticsContext.addMetaDataDiagnostic(metaDataDiagnostic);
}
if (logger.isDebugEnabled()) {
logger.debug("getServerAddressesViaGatewayAsync deserializes result");
}
logAddressResolutionEnd(request, identifier, null);
return dsr.getQueryResponse(null, Address.class);
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
logAddressResolutionEnd(request, identifier, unwrappedException.toString());
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(
request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, this.globalEndpointManager);
}
return Mono.error(dce);
});
}
public void dispose() {
}
private Mono<Pair<PartitionKeyRangeIdentity, AddressInformation[]>> resolveMasterAsync(RxDocumentServiceRequest request, boolean forceRefresh, Map<String, Object> properties) {
logger.debug("resolveMasterAsync forceRefresh: {}", forceRefresh);
Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterAddressAndRangeInitial = this.masterPartitionAddressCache;
forceRefresh = forceRefresh ||
(masterAddressAndRangeInitial != null &&
notAllReplicasAvailable(masterAddressAndRangeInitial.getRight()) &&
Duration.between(this.suboptimalMasterPartitionTimestamp, Instant.now()).getSeconds() > this.suboptimalPartitionForceRefreshIntervalInSeconds);
if (forceRefresh || this.masterPartitionAddressCache == null) {
Mono<List<Address>> masterReplicaAddressesObs = this.getMasterAddressesViaGatewayAsync(
request,
ResourceType.Database,
null,
databaseFeedEntryUrl,
forceRefresh,
false,
properties);
return masterReplicaAddressesObs.map(
masterAddresses -> {
Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterAddressAndRangeRes =
this.toPartitionAddressAndRange("", masterAddresses);
this.masterPartitionAddressCache = masterAddressAndRangeRes;
if (notAllReplicasAvailable(masterAddressAndRangeRes.getRight())
&& this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) {
this.suboptimalMasterPartitionTimestamp = Instant.now();
} else {
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
}
return masterPartitionAddressCache;
})
.doOnError(
e -> {
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
});
} else {
if (notAllReplicasAvailable(masterAddressAndRangeInitial.getRight())
&& this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) {
this.suboptimalMasterPartitionTimestamp = Instant.now();
}
return Mono.just(masterAddressAndRangeInitial);
}
}
private void evaluateCollectionRoutingMapRefreshForServerPartition(
RxDocumentServiceRequest request,
PartitionKeyRangeIdentity pkRangeIdentity,
boolean forceRefreshPartitionAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
validatePkRangeIdentity(pkRangeIdentity);
String collectionRid = pkRangeIdentity.getCollectionRid();
String partitionKeyRangeId = pkRangeIdentity.getPartitionKeyRangeId();
if (forceRefreshPartitionAddresses) {
ForcedRefreshMetadata forcedRefreshMetadata = this.lastForcedRefreshMap.computeIfAbsent(
collectionRid,
(colRid) -> new ForcedRefreshMetadata());
if (request.forceCollectionRoutingMapRefresh) {
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
true);
} else if (forcedRefreshMetadata.shouldIncludeCollectionRoutingMapRefresh(pkRangeIdentity)) {
request.forceCollectionRoutingMapRefresh = true;
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
true);
} else {
forcedRefreshMetadata.signalPartitionAddressOnlyRefresh(pkRangeIdentity);
}
} else if (request.forceCollectionRoutingMapRefresh) {
ForcedRefreshMetadata forcedRefreshMetadata = this.lastForcedRefreshMap.computeIfAbsent(
collectionRid,
(colRid) -> new ForcedRefreshMetadata());
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
false);
}
logger.debug("evaluateCollectionRoutingMapRefreshForServerPartition collectionRid {}, partitionKeyRangeId {},"
+ " " +
"forceRefreshPartitionAddresses {}, forceCollectionRoutingMapRefresh {}",
collectionRid,
partitionKeyRangeId,
forceRefreshPartitionAddresses,
request.forceCollectionRoutingMapRefresh);
}
private void validatePkRangeIdentity(PartitionKeyRangeIdentity pkRangeIdentity) {
Utils.checkNotNullOrThrow(pkRangeIdentity, "pkRangeId", "");
Utils.checkNotNullOrThrow(
pkRangeIdentity.getCollectionRid(),
"pkRangeId.getCollectionRid()",
"");
Utils.checkNotNullOrThrow(
pkRangeIdentity.getPartitionKeyRangeId(),
"pkRangeId.getPartitionKeyRangeId()",
"");
}
private Mono<AddressInformation[]> getAddressesForRangeId(
RxDocumentServiceRequest request,
PartitionKeyRangeIdentity pkRangeIdentity,
boolean forceRefresh,
AddressInformation[] cachedAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
validatePkRangeIdentity(pkRangeIdentity);
String collectionRid = pkRangeIdentity.getCollectionRid();
String partitionKeyRangeId = pkRangeIdentity.getPartitionKeyRangeId();
logger.debug(
"getAddressesForRangeId collectionRid {}, partitionKeyRangeId {}, forceRefresh {}",
collectionRid,
partitionKeyRangeId,
forceRefresh);
Mono<List<Address>> addressResponse = this.getServerAddressesViaGatewayAsync(request, collectionRid, Collections.singletonList(partitionKeyRangeId), forceRefresh);
Mono<List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>>> addressInfos =
addressResponse.map(
addresses -> {
if (logger.isDebugEnabled()) {
logger.debug("addresses from getServerAddressesViaGatewayAsync in getAddressesForRangeId {}",
JavaStreamUtils.info(addresses));
}
return addresses
.stream()
.filter(addressInfo -> this.protocolScheme.equals(addressInfo.getProtocolScheme()))
.collect(Collectors.groupingBy(Address::getParitionKeyRangeId))
.values()
.stream()
.map(groupedAddresses -> toPartitionAddressAndRange(collectionRid, addresses))
.collect(Collectors.toList());
});
Mono<List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>>> result =
addressInfos
.map(addressInfo -> addressInfo.stream()
.filter(a -> StringUtils.equals(a.getLeft().getPartitionKeyRangeId(), partitionKeyRangeId))
.collect(Collectors.toList()));
return result
.flatMap(
list -> {
if (logger.isDebugEnabled()) {
logger.debug("getAddressesForRangeId flatMap got result {}", JavaStreamUtils.info(list));
}
if (list.isEmpty()) {
String errorMessage = String.format(
RMResources.PartitionKeyRangeNotFound,
partitionKeyRangeId,
collectionRid);
PartitionKeyRangeGoneException e = new PartitionKeyRangeGoneException(errorMessage);
BridgeInternal.setResourceAddress(e, collectionRid);
return Mono.error(e);
} else {
AddressInformation[] mergedAddresses = this.mergeAddresses(list.get(0).getRight(), cachedAddresses);
for (AddressInformation address : mergedAddresses) {
address.getPhysicalUri().setRefreshed();
}
if (this.replicaAddressValidationEnabled) {
this.validateReplicaAddresses(mergedAddresses);
}
return Mono.just(mergedAddresses);
}
})
.doOnError(e -> logger.debug("getAddressesForRangeId", e));
}
public Mono<List<Address>> getMasterAddressesViaGatewayAsync(
RxDocumentServiceRequest request,
ResourceType resourceType,
String resourceAddress,
String entryUrl,
boolean forceRefresh,
boolean useMasterCollectionResolver,
Map<String, Object> properties) {
logger.debug("getMasterAddressesViaGatewayAsync " +
"resourceType {}, " +
"resourceAddress {}, " +
"entryUrl {}, " +
"forceRefresh {}, " +
"useMasterCollectionResolver {}",
resourceType,
resourceAddress,
entryUrl,
forceRefresh,
useMasterCollectionResolver
);
request.setAddressRefresh(true, forceRefresh);
HashMap<String, String> queryParameters = new HashMap<>();
queryParameters.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl));
HashMap<String, String> headers = new HashMap<>(defaultRequestHeaders);
if (forceRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, "true");
}
if (useMasterCollectionResolver) {
headers.put(HttpConstants.HttpHeaders.USE_MASTER_COLLECTION_RESOLVER, "true");
}
if(request.forceCollectionRoutingMapRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_COLLECTION_ROUTING_MAP_REFRESH, "true");
}
queryParameters.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter));
headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
String token = this.tokenProvider.getUserAuthorizationToken(
resourceAddress,
resourceType,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
properties);
headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, HttpUtils.urlEncode(token));
}
URI targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(queryParameters));
String identifier = logAddressResolutionStart(
request, targetEndpoint, true, true);
HttpHeaders defaultHttpHeaders = new HttpHeaders(headers);
HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), defaultHttpHeaders);
Instant addressCallStartTime = Instant.now();
Mono<HttpResponse> httpResponseMono;
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
httpResponseMono = this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()));
} else {
httpResponseMono = tokenProvider
.populateAuthorizationHeader(defaultHttpHeaders)
.flatMap(valueHttpHeaders -> this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds())));
}
Mono<RxDocumentServiceResponse> dsrObs = HttpClientUtils.parseResponseAsync(request, this.clientContext, httpResponseMono, httpRequest);
return dsrObs.map(
dsr -> {
MetadataDiagnosticsContext metadataDiagnosticsContext =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (metadataDiagnosticsContext != null) {
Instant addressCallEndTime = Instant.now();
MetadataDiagnostics metaDataDiagnostic = new MetadataDiagnostics(addressCallStartTime,
addressCallEndTime,
MetadataType.MASTER_ADDRESS_LOOK_UP);
metadataDiagnosticsContext.addMetaDataDiagnostic(metaDataDiagnostic);
}
logAddressResolutionEnd(request, identifier, null);
return dsr.getQueryResponse(null, Address.class);
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
logAddressResolutionEnd(request, identifier, unwrappedException.toString());
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(
request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, this.globalEndpointManager);
}
return Mono.error(dce);
});
}
/***
* merge the new addresses get back from gateway with the cached addresses.
* If the address is being returned from gateway again, then keep using the cached addressInformation object
* If it is a new address being returned, then use the new addressInformation object.
*
* @param newAddresses the latest addresses being returned from gateway.
* @param cachedAddresses the cached addresses.
*
* @return the merged addresses.
*/
private void validateReplicaAddresses(AddressInformation[] addresses) {
checkNotNull(addresses, "Argument 'addresses' can not be null");
List<Uri> addressesNeedToValidation =
Arrays
.stream(addresses)
.map(address -> address.getPhysicalUri())
.filter(addressUri -> addressUri.getHealthStatus() == Uri.HealthStatus.UnhealthyPending)
.collect(Collectors.toList());
if (addressesNeedToValidation.size() > 0) {
this.openConnectionsHandler
.openConnections(addressesNeedToValidation)
.subscribeOn(CosmosSchedulers.OPEN_CONNECTIONS_BOUNDED_ELASTIC)
.subscribe();
}
}
private Pair<PartitionKeyRangeIdentity, AddressInformation[]> toPartitionAddressAndRange(String collectionRid, List<Address> addresses) {
if (logger.isDebugEnabled()) {
logger.debug("toPartitionAddressAndRange");
}
Address address = addresses.get(0);
PartitionKeyRangeIdentity partitionKeyRangeIdentity = new PartitionKeyRangeIdentity(collectionRid, address.getParitionKeyRangeId());
AddressInformation[] addressInfos =
addresses
.stream()
.map(addr -> GatewayAddressCache.toAddressInformation(addr))
.collect(Collectors.toList())
.toArray(new AddressInformation[addresses.size()]);
if (this.tcpConnectionEndpointRediscoveryEnabled) {
for (AddressInformation addressInfo : addressInfos) {
if (logger.isDebugEnabled()) {
logger.debug(
"Added address to serverPartitionAddressToPkRangeIdMap: ({\"partitionKeyRangeIdentity\":{},\"address\":{}})",
partitionKeyRangeIdentity,
addressInfo);
}
this.serverPartitionAddressToPkRangeIdMap.compute(addressInfo.getServerKey(), (serverKey, partitionKeyRangeIdentitySet) -> {
if (partitionKeyRangeIdentitySet == null) {
partitionKeyRangeIdentitySet = ConcurrentHashMap.newKeySet();
}
partitionKeyRangeIdentitySet.add(partitionKeyRangeIdentity);
return partitionKeyRangeIdentitySet;
});
}
}
return Pair.of(partitionKeyRangeIdentity, addressInfos);
}
private static AddressInformation toAddressInformation(Address address) {
return new AddressInformation(true, address.isPrimary(), address.getPhyicalUri(), address.getProtocolScheme());
}
public Flux<OpenConnectionResponse> openConnectionsAndInitCaches(
DocumentCollection collection,
List<PartitionKeyRangeIdentity> partitionKeyRangeIdentities) {
checkNotNull(collection, "Argument 'collection' should not be null");
checkNotNull(partitionKeyRangeIdentities, "Argument 'partitionKeyRangeIdentities' should not be null");
if (logger.isDebugEnabled()) {
logger.debug(
"openConnectionsAndInitCaches collection: {}, partitionKeyRangeIdentities: {}",
collection.getResourceId(),
JavaStreamUtils.toString(partitionKeyRangeIdentities, ","));
}
List<Flux<List<Address>>> tasks = new ArrayList<>();
int batchSize = GatewayAddressCache.DefaultBatchSize;
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this.clientContext,
OperationType.Read,
collection.getResourceId(),
ResourceType.DocumentCollection,
Collections.emptyMap());
for (int i = 0; i < partitionKeyRangeIdentities.size(); i += batchSize) {
int endIndex = i + batchSize;
endIndex = Math.min(endIndex, partitionKeyRangeIdentities.size());
tasks.add(
this.getServerAddressesViaGatewayWithRetry(
request,
collection.getResourceId(),
partitionKeyRangeIdentities
.subList(i, endIndex)
.stream()
.map(PartitionKeyRangeIdentity::getPartitionKeyRangeId)
.collect(Collectors.toList()),
false).flux());
}
return Flux.concat(tasks)
.flatMap(list -> {
List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>> addressInfos =
list.stream()
.filter(addressInfo -> this.protocolScheme.equals(addressInfo.getProtocolScheme()))
.collect(Collectors.groupingBy(Address::getParitionKeyRangeId))
.values()
.stream().map(addresses -> toPartitionAddressAndRange(collection.getResourceId(), addresses))
.collect(Collectors.toList());
return Flux.fromIterable(addressInfos)
.flatMap(addressInfo -> {
this.serverPartitionAddressCache.set(addressInfo.getLeft(), addressInfo.getRight());
if (this.openConnectionsHandler != null) {
return this.openConnectionsHandler.openConnections(
Arrays
.stream(addressInfo.getRight())
.map(addressInformation -> addressInformation.getPhysicalUri())
.collect(Collectors.toList()));
}
logger.info("OpenConnectionHandler is null, can not open connections");
return Flux.empty();
});
});
}
private Mono<List<Address>> getServerAddressesViaGatewayWithRetry(
RxDocumentServiceRequest request,
String collectionRid,
List<String> partitionKeyRangeIds,
boolean forceRefresh) {
OpenConnectionAndInitCachesRetryPolicy openConnectionAndInitCachesRetryPolicy =
new OpenConnectionAndInitCachesRetryPolicy(this.connectionPolicy.getThrottlingRetryOptions());
return BackoffRetryUtility.executeRetry(
() -> this.getServerAddressesViaGatewayAsync(request, collectionRid, partitionKeyRangeIds, forceRefresh),
openConnectionAndInitCachesRetryPolicy);
}
private boolean notAllReplicasAvailable(AddressInformation[] addressInformations) {
return addressInformations.length < ServiceConfig.SystemReplicationPolicy.MaxReplicaSetSize;
}
private static String logAddressResolutionStart(
RxDocumentServiceRequest request,
URI targetEndpointUrl,
boolean forceRefresh,
boolean forceCollectionRoutingMapRefresh) {
if (request.requestContext.cosmosDiagnostics != null) {
return BridgeInternal.recordAddressResolutionStart(
request.requestContext.cosmosDiagnostics,
targetEndpointUrl,
forceRefresh,
forceCollectionRoutingMapRefresh);
}
return null;
}
private static void logAddressResolutionEnd(RxDocumentServiceRequest request, String identifier, String errorMessage) {
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordAddressResolutionEnd(request.requestContext.cosmosDiagnostics, identifier, errorMessage);
}
}
private static class ForcedRefreshMetadata {
private final ConcurrentHashMap<PartitionKeyRangeIdentity, Instant> lastPartitionAddressOnlyRefresh;
private Instant lastCollectionRoutingMapRefresh;
public ForcedRefreshMetadata() {
lastPartitionAddressOnlyRefresh = new ConcurrentHashMap<>();
lastCollectionRoutingMapRefresh = Instant.now();
}
public void signalCollectionRoutingMapRefresh(
PartitionKeyRangeIdentity pk,
boolean forcePartitionAddressRefresh) {
Instant nowSnapshot = Instant.now();
if (forcePartitionAddressRefresh) {
lastPartitionAddressOnlyRefresh.put(pk, nowSnapshot);
}
lastCollectionRoutingMapRefresh = nowSnapshot;
}
public void signalPartitionAddressOnlyRefresh(PartitionKeyRangeIdentity pk) {
lastPartitionAddressOnlyRefresh.put(pk, Instant.now());
}
public boolean shouldIncludeCollectionRoutingMapRefresh(PartitionKeyRangeIdentity pk) {
Instant lastPartitionAddressRefreshSnapshot = lastPartitionAddressOnlyRefresh.get(pk);
Instant lastCollectionRoutingMapRefreshSnapshot = lastCollectionRoutingMapRefresh;
if (lastPartitionAddressRefreshSnapshot == null ||
!lastPartitionAddressRefreshSnapshot.isAfter(lastCollectionRoutingMapRefreshSnapshot)) {
return false;
}
Duration durationSinceLastForcedCollectionRoutingMapRefresh =
Duration.between(lastCollectionRoutingMapRefreshSnapshot, Instant.now());
boolean returnValue = durationSinceLastForcedCollectionRoutingMapRefresh
.compareTo(minDurationBeforeEnforcingCollectionRoutingMapRefresh) >= 0;
return returnValue;
}
}
} | class GatewayAddressCache implements IAddressCache {
private final static Duration minDurationBeforeEnforcingCollectionRoutingMapRefresh = Duration.ofSeconds(30);
private final static Logger logger = LoggerFactory.getLogger(GatewayAddressCache.class);
private final static String protocolFilterFormat = "%s eq %s";
private final static int DefaultBatchSize = 50;
private final static int DefaultSuboptimalPartitionForceRefreshIntervalInSeconds = 600;
private final DiagnosticsClientContext clientContext;
private final String databaseFeedEntryUrl = PathsHelper.generatePath(ResourceType.Database, "", true);
private final URI addressEndpoint;
private final AsyncCacheNonBlocking<PartitionKeyRangeIdentity, AddressInformation[]> serverPartitionAddressCache;
private final ConcurrentHashMap<PartitionKeyRangeIdentity, Instant> suboptimalServerPartitionTimestamps;
private final long suboptimalPartitionForceRefreshIntervalInSeconds;
private final String protocolScheme;
private final String protocolFilter;
private final IAuthorizationTokenProvider tokenProvider;
private final HashMap<String, String> defaultRequestHeaders;
private final HttpClient httpClient;
private volatile Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterPartitionAddressCache;
private volatile Instant suboptimalMasterPartitionTimestamp;
private final ConcurrentHashMap<URI, Set<PartitionKeyRangeIdentity>> serverPartitionAddressToPkRangeIdMap;
private final boolean tcpConnectionEndpointRediscoveryEnabled;
private final ConcurrentHashMap<String, ForcedRefreshMetadata> lastForcedRefreshMap;
private final GlobalEndpointManager globalEndpointManager;
private IOpenConnectionsHandler openConnectionsHandler;
private final ConnectionPolicy connectionPolicy;
private final boolean replicaAddressValidationEnabled;
public GatewayAddressCache(
DiagnosticsClientContext clientContext,
URI serviceEndpoint,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
UserAgentContainer userAgent,
HttpClient httpClient,
long suboptimalPartitionForceRefreshIntervalInSeconds,
boolean tcpConnectionEndpointRediscoveryEnabled,
ApiType apiType,
GlobalEndpointManager globalEndpointManager,
ConnectionPolicy connectionPolicy,
IOpenConnectionsHandler openConnectionsHandler) {
this.clientContext = clientContext;
try {
this.addressEndpoint = new URL(serviceEndpoint.toURL(), Paths.ADDRESS_PATH_SEGMENT).toURI();
} catch (MalformedURLException | URISyntaxException e) {
logger.error("serviceEndpoint {} is invalid", serviceEndpoint, e);
assert false;
throw new IllegalStateException(e);
}
this.tokenProvider = tokenProvider;
this.serverPartitionAddressCache = new AsyncCacheNonBlocking<>();
this.suboptimalServerPartitionTimestamps = new ConcurrentHashMap<>();
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
this.suboptimalPartitionForceRefreshIntervalInSeconds = suboptimalPartitionForceRefreshIntervalInSeconds;
this.protocolScheme = protocol.scheme();
this.protocolFilter = String.format(GatewayAddressCache.protocolFilterFormat,
Constants.Properties.PROTOCOL,
this.protocolScheme);
this.httpClient = httpClient;
if (userAgent == null) {
userAgent = new UserAgentContainer();
}
defaultRequestHeaders = new HashMap<>();
defaultRequestHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgent.getUserAgent());
if(apiType != null) {
defaultRequestHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString());
}
defaultRequestHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION);
this.serverPartitionAddressToPkRangeIdMap = new ConcurrentHashMap<>();
this.tcpConnectionEndpointRediscoveryEnabled = tcpConnectionEndpointRediscoveryEnabled;
this.lastForcedRefreshMap = new ConcurrentHashMap<>();
this.globalEndpointManager = globalEndpointManager;
this.openConnectionsHandler = openConnectionsHandler;
this.connectionPolicy = connectionPolicy;
this.replicaAddressValidationEnabled = Configs.isReplicaAddressValidationEnabled();
}
public GatewayAddressCache(
DiagnosticsClientContext clientContext,
URI serviceEndpoint,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
UserAgentContainer userAgent,
HttpClient httpClient,
boolean tcpConnectionEndpointRediscoveryEnabled,
ApiType apiType,
GlobalEndpointManager globalEndpointManager,
ConnectionPolicy connectionPolicy,
IOpenConnectionsHandler openConnectionsHandler) {
this(clientContext,
serviceEndpoint,
protocol,
tokenProvider,
userAgent,
httpClient,
DefaultSuboptimalPartitionForceRefreshIntervalInSeconds,
tcpConnectionEndpointRediscoveryEnabled,
apiType,
globalEndpointManager,
connectionPolicy,
openConnectionsHandler);
}
@Override
public int updateAddresses(final URI serverKey) {
Objects.requireNonNull(serverKey, "expected non-null serverKey");
AtomicInteger updatedCacheEntryCount = new AtomicInteger(0);
if (this.tcpConnectionEndpointRediscoveryEnabled) {
this.serverPartitionAddressToPkRangeIdMap.computeIfPresent(serverKey, (uri, partitionKeyRangeIdentitySet) -> {
for (PartitionKeyRangeIdentity partitionKeyRangeIdentity : partitionKeyRangeIdentitySet) {
if (partitionKeyRangeIdentity.getPartitionKeyRangeId().equals(PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) {
this.masterPartitionAddressCache = null;
} else {
this.serverPartitionAddressCache.remove(partitionKeyRangeIdentity);
}
updatedCacheEntryCount.incrementAndGet();
}
return null;
});
} else {
logger.warn("tcpConnectionEndpointRediscovery is not enabled, should not reach here.");
}
return updatedCacheEntryCount.get();
}
@Override
public Mono<Utils.ValueHolder<AddressInformation[]>> tryGetAddresses(RxDocumentServiceRequest request,
PartitionKeyRangeIdentity partitionKeyRangeIdentity,
boolean forceRefreshPartitionAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
Utils.checkNotNullOrThrow(partitionKeyRangeIdentity, "partitionKeyRangeIdentity", "");
logger.debug("PartitionKeyRangeIdentity {}, forceRefreshPartitionAddresses {}",
partitionKeyRangeIdentity,
forceRefreshPartitionAddresses);
if (StringUtils.equals(partitionKeyRangeIdentity.getPartitionKeyRangeId(),
PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) {
return this.resolveMasterAsync(request, forceRefreshPartitionAddresses, request.properties)
.map(partitionKeyRangeIdentityPair -> new Utils.ValueHolder<>(partitionKeyRangeIdentityPair.getRight()));
}
evaluateCollectionRoutingMapRefreshForServerPartition(
request, partitionKeyRangeIdentity, forceRefreshPartitionAddresses);
Instant suboptimalServerPartitionTimestamp = this.suboptimalServerPartitionTimestamps.get(partitionKeyRangeIdentity);
if (suboptimalServerPartitionTimestamp != null) {
logger.debug("suboptimalServerPartitionTimestamp is {}", suboptimalServerPartitionTimestamp);
boolean forceRefreshDueToSuboptimalPartitionReplicaSet = Duration.between(suboptimalServerPartitionTimestamp, Instant.now()).getSeconds()
> this.suboptimalPartitionForceRefreshIntervalInSeconds;
if (forceRefreshDueToSuboptimalPartitionReplicaSet) {
Instant newValue = this.suboptimalServerPartitionTimestamps.computeIfPresent(partitionKeyRangeIdentity,
(key, oldVal) -> {
logger.debug("key = {}, oldValue = {}", key, oldVal);
if (suboptimalServerPartitionTimestamp.equals(oldVal)) {
return Instant.MAX;
} else {
return oldVal;
}
});
logger.debug("newValue is {}", newValue);
if (!suboptimalServerPartitionTimestamp.equals(newValue)) {
logger.debug("setting forceRefreshPartitionAddresses to true");
forceRefreshPartitionAddresses = true;
}
}
}
final boolean forceRefreshPartitionAddressesModified = forceRefreshPartitionAddresses;
if (forceRefreshPartitionAddressesModified) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
}
Mono<Utils.ValueHolder<AddressInformation[]>> addressesObs =
this.serverPartitionAddressCache
.getAsync(
partitionKeyRangeIdentity,
cachedAddresses -> this.getAddressesForRangeId(
request,
partitionKeyRangeIdentity,
forceRefreshPartitionAddressesModified,
cachedAddresses),
cachedAddresses -> {
for (Uri failedEndpoints : request.requestContext.getFailedEndpoints()) {
failedEndpoints.setUnhealthy();
}
return forceRefreshPartitionAddressesModified
|| Arrays.stream(cachedAddresses).anyMatch(addressInformation -> addressInformation.getPhysicalUri().shouldRefreshHealthStatus());
})
.map(Utils.ValueHolder::new);
return addressesObs
.map(addressesValueHolder -> {
if (notAllReplicasAvailable(addressesValueHolder.v)) {
if (logger.isDebugEnabled()) {
logger.debug("not all replicas available {}", JavaStreamUtils.info(addressesValueHolder.v));
}
this.suboptimalServerPartitionTimestamps.putIfAbsent(partitionKeyRangeIdentity, Instant.now());
}
return addressesValueHolder;
})
.onErrorResume(ex -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(ex);
CosmosException dce = Utils.as(unwrappedException, CosmosException.class);
if (dce == null) {
logger.error("unexpected failure", ex);
if (forceRefreshPartitionAddressesModified) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
}
return Mono.error(unwrappedException);
} else {
logger.debug("tryGetAddresses dce", dce);
if (Exceptions.isNotFound(dce) ||
Exceptions.isGone(dce) ||
Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE)) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
logger.debug("tryGetAddresses: inner onErrorResumeNext return null", dce);
return Mono.just(new Utils.ValueHolder<>(null));
}
return Mono.error(unwrappedException);
}
});
}
@Override
public void setOpenConnectionsHandler(IOpenConnectionsHandler openConnectionsHandler) {
this.openConnectionsHandler = openConnectionsHandler;
}
public Mono<List<Address>> getServerAddressesViaGatewayAsync(
RxDocumentServiceRequest request,
String collectionRid,
List<String> partitionKeyRangeIds,
boolean forceRefresh) {
if (logger.isDebugEnabled()) {
logger.debug("getServerAddressesViaGatewayAsync collectionRid {}, partitionKeyRangeIds {}", collectionRid,
JavaStreamUtils.toString(partitionKeyRangeIds, ","));
}
request.setAddressRefresh(true, forceRefresh);
String entryUrl = PathsHelper.generatePath(ResourceType.Document, collectionRid, true);
HashMap<String, String> addressQuery = new HashMap<>();
addressQuery.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl));
HashMap<String, String> headers = new HashMap<>(defaultRequestHeaders);
if (forceRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, "true");
}
if (request.forceCollectionRoutingMapRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_COLLECTION_ROUTING_MAP_REFRESH, "true");
}
addressQuery.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter));
addressQuery.put(HttpConstants.QueryStrings.PARTITION_KEY_RANGE_IDS, String.join(",", partitionKeyRangeIds));
headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
String token = null;
try {
token = this.tokenProvider.getUserAuthorizationToken(
collectionRid,
ResourceType.Document,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
request.properties);
} catch (UnauthorizedException e) {
if (logger.isDebugEnabled()) {
logger.debug("User doesn't have resource token for collection rid {}", collectionRid);
}
}
if (token == null && request.getIsNameBased()) {
String collectionAltLink = PathsHelper.getCollectionPath(request.getResourceAddress());
token = this.tokenProvider.getUserAuthorizationToken(
collectionAltLink,
ResourceType.Document,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
request.properties);
}
token = HttpUtils.urlEncode(token);
headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, token);
}
URI targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(addressQuery));
String identifier = logAddressResolutionStart(
request, targetEndpoint, forceRefresh, request.forceCollectionRoutingMapRefresh);
HttpHeaders httpHeaders = new HttpHeaders(headers);
Instant addressCallStartTime = Instant.now();
HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders);
Mono<HttpResponse> httpResponseMono;
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
httpResponseMono = this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()));
} else {
httpResponseMono = tokenProvider
.populateAuthorizationHeader(httpHeaders)
.flatMap(valueHttpHeaders -> this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds())));
}
Mono<RxDocumentServiceResponse> dsrObs = HttpClientUtils.parseResponseAsync(request, clientContext, httpResponseMono, httpRequest);
return dsrObs.map(
dsr -> {
MetadataDiagnosticsContext metadataDiagnosticsContext =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (metadataDiagnosticsContext != null) {
Instant addressCallEndTime = Instant.now();
MetadataDiagnostics metaDataDiagnostic = new MetadataDiagnostics(addressCallStartTime,
addressCallEndTime,
MetadataType.SERVER_ADDRESS_LOOKUP);
metadataDiagnosticsContext.addMetaDataDiagnostic(metaDataDiagnostic);
}
if (logger.isDebugEnabled()) {
logger.debug("getServerAddressesViaGatewayAsync deserializes result");
}
logAddressResolutionEnd(request, identifier, null);
return dsr.getQueryResponse(null, Address.class);
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
logAddressResolutionEnd(request, identifier, unwrappedException.toString());
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(
request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, this.globalEndpointManager);
}
return Mono.error(dce);
});
}
public void dispose() {
}
private Mono<Pair<PartitionKeyRangeIdentity, AddressInformation[]>> resolveMasterAsync(RxDocumentServiceRequest request, boolean forceRefresh, Map<String, Object> properties) {
logger.debug("resolveMasterAsync forceRefresh: {}", forceRefresh);
Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterAddressAndRangeInitial = this.masterPartitionAddressCache;
forceRefresh = forceRefresh ||
(masterAddressAndRangeInitial != null &&
notAllReplicasAvailable(masterAddressAndRangeInitial.getRight()) &&
Duration.between(this.suboptimalMasterPartitionTimestamp, Instant.now()).getSeconds() > this.suboptimalPartitionForceRefreshIntervalInSeconds);
if (forceRefresh || this.masterPartitionAddressCache == null) {
Mono<List<Address>> masterReplicaAddressesObs = this.getMasterAddressesViaGatewayAsync(
request,
ResourceType.Database,
null,
databaseFeedEntryUrl,
forceRefresh,
false,
properties);
return masterReplicaAddressesObs.map(
masterAddresses -> {
Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterAddressAndRangeRes =
this.toPartitionAddressAndRange("", masterAddresses);
this.masterPartitionAddressCache = masterAddressAndRangeRes;
if (notAllReplicasAvailable(masterAddressAndRangeRes.getRight())
&& this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) {
this.suboptimalMasterPartitionTimestamp = Instant.now();
} else {
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
}
return masterPartitionAddressCache;
})
.doOnError(
e -> {
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
});
} else {
if (notAllReplicasAvailable(masterAddressAndRangeInitial.getRight())
&& this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) {
this.suboptimalMasterPartitionTimestamp = Instant.now();
}
return Mono.just(masterAddressAndRangeInitial);
}
}
private void evaluateCollectionRoutingMapRefreshForServerPartition(
RxDocumentServiceRequest request,
PartitionKeyRangeIdentity pkRangeIdentity,
boolean forceRefreshPartitionAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
validatePkRangeIdentity(pkRangeIdentity);
String collectionRid = pkRangeIdentity.getCollectionRid();
String partitionKeyRangeId = pkRangeIdentity.getPartitionKeyRangeId();
if (forceRefreshPartitionAddresses) {
ForcedRefreshMetadata forcedRefreshMetadata = this.lastForcedRefreshMap.computeIfAbsent(
collectionRid,
(colRid) -> new ForcedRefreshMetadata());
if (request.forceCollectionRoutingMapRefresh) {
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
true);
} else if (forcedRefreshMetadata.shouldIncludeCollectionRoutingMapRefresh(pkRangeIdentity)) {
request.forceCollectionRoutingMapRefresh = true;
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
true);
} else {
forcedRefreshMetadata.signalPartitionAddressOnlyRefresh(pkRangeIdentity);
}
} else if (request.forceCollectionRoutingMapRefresh) {
ForcedRefreshMetadata forcedRefreshMetadata = this.lastForcedRefreshMap.computeIfAbsent(
collectionRid,
(colRid) -> new ForcedRefreshMetadata());
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
false);
}
logger.debug("evaluateCollectionRoutingMapRefreshForServerPartition collectionRid {}, partitionKeyRangeId {},"
+ " " +
"forceRefreshPartitionAddresses {}, forceCollectionRoutingMapRefresh {}",
collectionRid,
partitionKeyRangeId,
forceRefreshPartitionAddresses,
request.forceCollectionRoutingMapRefresh);
}
private void validatePkRangeIdentity(PartitionKeyRangeIdentity pkRangeIdentity) {
Utils.checkNotNullOrThrow(pkRangeIdentity, "pkRangeId", "");
Utils.checkNotNullOrThrow(
pkRangeIdentity.getCollectionRid(),
"pkRangeId.getCollectionRid()",
"");
Utils.checkNotNullOrThrow(
pkRangeIdentity.getPartitionKeyRangeId(),
"pkRangeId.getPartitionKeyRangeId()",
"");
}
private Mono<AddressInformation[]> getAddressesForRangeId(
RxDocumentServiceRequest request,
PartitionKeyRangeIdentity pkRangeIdentity,
boolean forceRefresh,
AddressInformation[] cachedAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
validatePkRangeIdentity(pkRangeIdentity);
String collectionRid = pkRangeIdentity.getCollectionRid();
String partitionKeyRangeId = pkRangeIdentity.getPartitionKeyRangeId();
logger.debug(
"getAddressesForRangeId collectionRid {}, partitionKeyRangeId {}, forceRefresh {}",
collectionRid,
partitionKeyRangeId,
forceRefresh);
Mono<List<Address>> addressResponse = this.getServerAddressesViaGatewayAsync(request, collectionRid, Collections.singletonList(partitionKeyRangeId), forceRefresh);
Mono<List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>>> addressInfos =
addressResponse.map(
addresses -> {
if (logger.isDebugEnabled()) {
logger.debug("addresses from getServerAddressesViaGatewayAsync in getAddressesForRangeId {}",
JavaStreamUtils.info(addresses));
}
return addresses
.stream()
.filter(addressInfo -> this.protocolScheme.equals(addressInfo.getProtocolScheme()))
.collect(Collectors.groupingBy(Address::getParitionKeyRangeId))
.values()
.stream()
.map(groupedAddresses -> toPartitionAddressAndRange(collectionRid, addresses))
.collect(Collectors.toList());
});
Mono<List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>>> result =
addressInfos
.map(addressInfo -> addressInfo.stream()
.filter(a -> StringUtils.equals(a.getLeft().getPartitionKeyRangeId(), partitionKeyRangeId))
.collect(Collectors.toList()));
return result
.flatMap(
list -> {
if (logger.isDebugEnabled()) {
logger.debug("getAddressesForRangeId flatMap got result {}", JavaStreamUtils.info(list));
}
if (list.isEmpty()) {
String errorMessage = String.format(
RMResources.PartitionKeyRangeNotFound,
partitionKeyRangeId,
collectionRid);
PartitionKeyRangeGoneException e = new PartitionKeyRangeGoneException(errorMessage);
BridgeInternal.setResourceAddress(e, collectionRid);
return Mono.error(e);
} else {
AddressInformation[] mergedAddresses = this.mergeAddresses(list.get(0).getRight(), cachedAddresses);
for (AddressInformation address : mergedAddresses) {
address.getPhysicalUri().setRefreshed();
}
if (this.replicaAddressValidationEnabled) {
this.validateReplicaAddresses(mergedAddresses);
}
return Mono.just(mergedAddresses);
}
})
.doOnError(e -> logger.debug("getAddressesForRangeId", e));
}
public Mono<List<Address>> getMasterAddressesViaGatewayAsync(
RxDocumentServiceRequest request,
ResourceType resourceType,
String resourceAddress,
String entryUrl,
boolean forceRefresh,
boolean useMasterCollectionResolver,
Map<String, Object> properties) {
logger.debug("getMasterAddressesViaGatewayAsync " +
"resourceType {}, " +
"resourceAddress {}, " +
"entryUrl {}, " +
"forceRefresh {}, " +
"useMasterCollectionResolver {}",
resourceType,
resourceAddress,
entryUrl,
forceRefresh,
useMasterCollectionResolver
);
request.setAddressRefresh(true, forceRefresh);
HashMap<String, String> queryParameters = new HashMap<>();
queryParameters.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl));
HashMap<String, String> headers = new HashMap<>(defaultRequestHeaders);
if (forceRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, "true");
}
if (useMasterCollectionResolver) {
headers.put(HttpConstants.HttpHeaders.USE_MASTER_COLLECTION_RESOLVER, "true");
}
if(request.forceCollectionRoutingMapRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_COLLECTION_ROUTING_MAP_REFRESH, "true");
}
queryParameters.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter));
headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
String token = this.tokenProvider.getUserAuthorizationToken(
resourceAddress,
resourceType,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
properties);
headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, HttpUtils.urlEncode(token));
}
URI targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(queryParameters));
String identifier = logAddressResolutionStart(
request, targetEndpoint, true, true);
HttpHeaders defaultHttpHeaders = new HttpHeaders(headers);
HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), defaultHttpHeaders);
Instant addressCallStartTime = Instant.now();
Mono<HttpResponse> httpResponseMono;
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
httpResponseMono = this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()));
} else {
httpResponseMono = tokenProvider
.populateAuthorizationHeader(defaultHttpHeaders)
.flatMap(valueHttpHeaders -> this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds())));
}
Mono<RxDocumentServiceResponse> dsrObs = HttpClientUtils.parseResponseAsync(request, this.clientContext, httpResponseMono, httpRequest);
return dsrObs.map(
dsr -> {
MetadataDiagnosticsContext metadataDiagnosticsContext =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (metadataDiagnosticsContext != null) {
Instant addressCallEndTime = Instant.now();
MetadataDiagnostics metaDataDiagnostic = new MetadataDiagnostics(addressCallStartTime,
addressCallEndTime,
MetadataType.MASTER_ADDRESS_LOOK_UP);
metadataDiagnosticsContext.addMetaDataDiagnostic(metaDataDiagnostic);
}
logAddressResolutionEnd(request, identifier, null);
return dsr.getQueryResponse(null, Address.class);
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
logAddressResolutionEnd(request, identifier, unwrappedException.toString());
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(
request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, this.globalEndpointManager);
}
return Mono.error(dce);
});
}
/***
* merge the new addresses get back from gateway with the cached addresses.
* If the address is being returned from gateway again, then keep using the cached addressInformation object
* If it is a new address being returned, then use the new addressInformation object.
*
* @param newAddresses the latest addresses being returned from gateway.
* @param cachedAddresses the cached addresses.
*
* @return the merged addresses.
*/
private void validateReplicaAddresses(AddressInformation[] addresses) {
checkNotNull(addresses, "Argument 'addresses' can not be null");
List<Uri> addressesNeedToValidation =
Arrays
.stream(addresses)
.map(address -> address.getPhysicalUri())
.filter(addressUri -> addressUri.getHealthStatus() == Uri.HealthStatus.UnhealthyPending)
.collect(Collectors.toList());
if (addressesNeedToValidation.size() > 0) {
this.openConnectionsHandler
.openConnections(addressesNeedToValidation)
.subscribeOn(CosmosSchedulers.OPEN_CONNECTIONS_BOUNDED_ELASTIC)
.subscribe();
}
}
private Pair<PartitionKeyRangeIdentity, AddressInformation[]> toPartitionAddressAndRange(String collectionRid, List<Address> addresses) {
if (logger.isDebugEnabled()) {
logger.debug("toPartitionAddressAndRange");
}
Address address = addresses.get(0);
PartitionKeyRangeIdentity partitionKeyRangeIdentity = new PartitionKeyRangeIdentity(collectionRid, address.getParitionKeyRangeId());
AddressInformation[] addressInfos =
addresses
.stream()
.map(addr -> GatewayAddressCache.toAddressInformation(addr))
.collect(Collectors.toList())
.toArray(new AddressInformation[addresses.size()]);
if (this.tcpConnectionEndpointRediscoveryEnabled) {
for (AddressInformation addressInfo : addressInfos) {
if (logger.isDebugEnabled()) {
logger.debug(
"Added address to serverPartitionAddressToPkRangeIdMap: ({\"partitionKeyRangeIdentity\":{},\"address\":{}})",
partitionKeyRangeIdentity,
addressInfo);
}
this.serverPartitionAddressToPkRangeIdMap.compute(addressInfo.getServerKey(), (serverKey, partitionKeyRangeIdentitySet) -> {
if (partitionKeyRangeIdentitySet == null) {
partitionKeyRangeIdentitySet = ConcurrentHashMap.newKeySet();
}
partitionKeyRangeIdentitySet.add(partitionKeyRangeIdentity);
return partitionKeyRangeIdentitySet;
});
}
}
return Pair.of(partitionKeyRangeIdentity, addressInfos);
}
private static AddressInformation toAddressInformation(Address address) {
return new AddressInformation(true, address.isPrimary(), address.getPhyicalUri(), address.getProtocolScheme());
}
public Flux<OpenConnectionResponse> openConnectionsAndInitCaches(
DocumentCollection collection,
List<PartitionKeyRangeIdentity> partitionKeyRangeIdentities) {
checkNotNull(collection, "Argument 'collection' should not be null");
checkNotNull(partitionKeyRangeIdentities, "Argument 'partitionKeyRangeIdentities' should not be null");
if (logger.isDebugEnabled()) {
logger.debug(
"openConnectionsAndInitCaches collection: {}, partitionKeyRangeIdentities: {}",
collection.getResourceId(),
JavaStreamUtils.toString(partitionKeyRangeIdentities, ","));
}
List<Flux<List<Address>>> tasks = new ArrayList<>();
int batchSize = GatewayAddressCache.DefaultBatchSize;
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this.clientContext,
OperationType.Read,
collection.getResourceId(),
ResourceType.DocumentCollection,
Collections.emptyMap());
for (int i = 0; i < partitionKeyRangeIdentities.size(); i += batchSize) {
int endIndex = i + batchSize;
endIndex = Math.min(endIndex, partitionKeyRangeIdentities.size());
tasks.add(
this.getServerAddressesViaGatewayWithRetry(
request,
collection.getResourceId(),
partitionKeyRangeIdentities
.subList(i, endIndex)
.stream()
.map(PartitionKeyRangeIdentity::getPartitionKeyRangeId)
.collect(Collectors.toList()),
false).flux());
}
return Flux.concat(tasks)
.flatMap(list -> {
List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>> addressInfos =
list.stream()
.filter(addressInfo -> this.protocolScheme.equals(addressInfo.getProtocolScheme()))
.collect(Collectors.groupingBy(Address::getParitionKeyRangeId))
.values()
.stream().map(addresses -> toPartitionAddressAndRange(collection.getResourceId(), addresses))
.collect(Collectors.toList());
return Flux.fromIterable(addressInfos)
.flatMap(addressInfo -> {
this.serverPartitionAddressCache.set(addressInfo.getLeft(), addressInfo.getRight());
if (this.openConnectionsHandler != null) {
return this.openConnectionsHandler.openConnections(
Arrays
.stream(addressInfo.getRight())
.map(addressInformation -> addressInformation.getPhysicalUri())
.collect(Collectors.toList()));
}
logger.info("OpenConnectionHandler is null, can not open connections");
return Flux.empty();
});
});
}
private Mono<List<Address>> getServerAddressesViaGatewayWithRetry(
RxDocumentServiceRequest request,
String collectionRid,
List<String> partitionKeyRangeIds,
boolean forceRefresh) {
OpenConnectionAndInitCachesRetryPolicy openConnectionAndInitCachesRetryPolicy =
new OpenConnectionAndInitCachesRetryPolicy(this.connectionPolicy.getThrottlingRetryOptions());
return BackoffRetryUtility.executeRetry(
() -> this.getServerAddressesViaGatewayAsync(request, collectionRid, partitionKeyRangeIds, forceRefresh),
openConnectionAndInitCachesRetryPolicy);
}
private boolean notAllReplicasAvailable(AddressInformation[] addressInformations) {
return addressInformations.length < ServiceConfig.SystemReplicationPolicy.MaxReplicaSetSize;
}
private static String logAddressResolutionStart(
RxDocumentServiceRequest request,
URI targetEndpointUrl,
boolean forceRefresh,
boolean forceCollectionRoutingMapRefresh) {
if (request.requestContext.cosmosDiagnostics != null) {
return BridgeInternal.recordAddressResolutionStart(
request.requestContext.cosmosDiagnostics,
targetEndpointUrl,
forceRefresh,
forceCollectionRoutingMapRefresh);
}
return null;
}
private static void logAddressResolutionEnd(RxDocumentServiceRequest request, String identifier, String errorMessage) {
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordAddressResolutionEnd(request.requestContext.cosmosDiagnostics, identifier, errorMessage);
}
}
private static class ForcedRefreshMetadata {
private final ConcurrentHashMap<PartitionKeyRangeIdentity, Instant> lastPartitionAddressOnlyRefresh;
private Instant lastCollectionRoutingMapRefresh;
public ForcedRefreshMetadata() {
lastPartitionAddressOnlyRefresh = new ConcurrentHashMap<>();
lastCollectionRoutingMapRefresh = Instant.now();
}
public void signalCollectionRoutingMapRefresh(
PartitionKeyRangeIdentity pk,
boolean forcePartitionAddressRefresh) {
Instant nowSnapshot = Instant.now();
if (forcePartitionAddressRefresh) {
lastPartitionAddressOnlyRefresh.put(pk, nowSnapshot);
}
lastCollectionRoutingMapRefresh = nowSnapshot;
}
public void signalPartitionAddressOnlyRefresh(PartitionKeyRangeIdentity pk) {
lastPartitionAddressOnlyRefresh.put(pk, Instant.now());
}
public boolean shouldIncludeCollectionRoutingMapRefresh(PartitionKeyRangeIdentity pk) {
Instant lastPartitionAddressRefreshSnapshot = lastPartitionAddressOnlyRefresh.get(pk);
Instant lastCollectionRoutingMapRefreshSnapshot = lastCollectionRoutingMapRefresh;
if (lastPartitionAddressRefreshSnapshot == null ||
!lastPartitionAddressRefreshSnapshot.isAfter(lastCollectionRoutingMapRefreshSnapshot)) {
return false;
}
Duration durationSinceLastForcedCollectionRoutingMapRefresh =
Duration.between(lastCollectionRoutingMapRefreshSnapshot, Instant.now());
boolean returnValue = durationSinceLastForcedCollectionRoutingMapRefresh
.compareTo(minDurationBeforeEnforcingCollectionRoutingMapRefresh) >= 0;
return returnValue;
}
}
} |
@xinlian12 I looked at the openConnections code in `RntbdOpenConnectionsHandler` class, and I think it can cause thread starvation. Let's discuss it offline since that class is not touched as part of this PR. | private void validateReplicaAddresses(AddressInformation[] addresses) {
checkNotNull(addresses, "Argument 'addresses' can not be null");
List<Uri> addressesNeedToValidation =
Arrays
.stream(addresses)
.map(address -> address.getPhysicalUri())
.filter(addressUri -> addressUri.getHealthStatus() == Uri.HealthStatus.UnhealthyPending)
.collect(Collectors.toList());
if (addressesNeedToValidation.size() > 0) {
this.openConnectionsHandler
.openConnections(addressesNeedToValidation)
.subscribeOn(CosmosSchedulers.OPEN_CONNECTIONS_BOUNDED_ELASTIC)
.subscribe();
}
} | .openConnections(addressesNeedToValidation) | private void validateReplicaAddresses(AddressInformation[] addresses) {
checkNotNull(addresses, "Argument 'addresses' can not be null");
List<Uri> addressesNeedToValidation =
Arrays
.stream(addresses)
.map(address -> address.getPhysicalUri())
.filter(addressUri -> addressUri.getHealthStatus() == Uri.HealthStatus.UnhealthyPending)
.collect(Collectors.toList());
if (addressesNeedToValidation.size() > 0) {
this.openConnectionsHandler
.openConnections(addressesNeedToValidation)
.subscribeOn(CosmosSchedulers.OPEN_CONNECTIONS_BOUNDED_ELASTIC)
.subscribe();
}
} | class GatewayAddressCache implements IAddressCache {
private final static Duration minDurationBeforeEnforcingCollectionRoutingMapRefresh = Duration.ofSeconds(30);
private final static Logger logger = LoggerFactory.getLogger(GatewayAddressCache.class);
private final static String protocolFilterFormat = "%s eq %s";
private final static int DefaultBatchSize = 50;
private final static int DefaultSuboptimalPartitionForceRefreshIntervalInSeconds = 600;
private final DiagnosticsClientContext clientContext;
private final String databaseFeedEntryUrl = PathsHelper.generatePath(ResourceType.Database, "", true);
private final URI addressEndpoint;
private final AsyncCache<PartitionKeyRangeIdentity, AddressInformation[]> serverPartitionAddressCache;
private final ConcurrentHashMap<PartitionKeyRangeIdentity, Instant> suboptimalServerPartitionTimestamps;
private final long suboptimalPartitionForceRefreshIntervalInSeconds;
private final String protocolScheme;
private final String protocolFilter;
private final IAuthorizationTokenProvider tokenProvider;
private final HashMap<String, String> defaultRequestHeaders;
private final HttpClient httpClient;
private volatile Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterPartitionAddressCache;
private volatile Instant suboptimalMasterPartitionTimestamp;
private final ConcurrentHashMap<URI, Set<PartitionKeyRangeIdentity>> serverPartitionAddressToPkRangeIdMap;
private final boolean tcpConnectionEndpointRediscoveryEnabled;
private final ConcurrentHashMap<String, ForcedRefreshMetadata> lastForcedRefreshMap;
private final GlobalEndpointManager globalEndpointManager;
private IOpenConnectionsHandler openConnectionsHandler;
private final ConnectionPolicy connectionPolicy;
private final boolean replicaAddressValidationEnabled;
public GatewayAddressCache(
DiagnosticsClientContext clientContext,
URI serviceEndpoint,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
UserAgentContainer userAgent,
HttpClient httpClient,
long suboptimalPartitionForceRefreshIntervalInSeconds,
boolean tcpConnectionEndpointRediscoveryEnabled,
ApiType apiType,
GlobalEndpointManager globalEndpointManager,
ConnectionPolicy connectionPolicy,
IOpenConnectionsHandler openConnectionsHandler) {
this.clientContext = clientContext;
try {
this.addressEndpoint = new URL(serviceEndpoint.toURL(), Paths.ADDRESS_PATH_SEGMENT).toURI();
} catch (MalformedURLException | URISyntaxException e) {
logger.error("serviceEndpoint {} is invalid", serviceEndpoint, e);
assert false;
throw new IllegalStateException(e);
}
this.tokenProvider = tokenProvider;
this.serverPartitionAddressCache = new AsyncCache<>();
this.suboptimalServerPartitionTimestamps = new ConcurrentHashMap<>();
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
this.suboptimalPartitionForceRefreshIntervalInSeconds = suboptimalPartitionForceRefreshIntervalInSeconds;
this.protocolScheme = protocol.scheme();
this.protocolFilter = String.format(GatewayAddressCache.protocolFilterFormat,
Constants.Properties.PROTOCOL,
this.protocolScheme);
this.httpClient = httpClient;
if (userAgent == null) {
userAgent = new UserAgentContainer();
}
defaultRequestHeaders = new HashMap<>();
defaultRequestHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgent.getUserAgent());
if(apiType != null) {
defaultRequestHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString());
}
defaultRequestHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION);
this.serverPartitionAddressToPkRangeIdMap = new ConcurrentHashMap<>();
this.tcpConnectionEndpointRediscoveryEnabled = tcpConnectionEndpointRediscoveryEnabled;
this.lastForcedRefreshMap = new ConcurrentHashMap<>();
this.globalEndpointManager = globalEndpointManager;
this.openConnectionsHandler = openConnectionsHandler;
this.connectionPolicy = connectionPolicy;
this.replicaAddressValidationEnabled = Configs.isReplicaAddressValidationEnabled();
}
public GatewayAddressCache(
DiagnosticsClientContext clientContext,
URI serviceEndpoint,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
UserAgentContainer userAgent,
HttpClient httpClient,
boolean tcpConnectionEndpointRediscoveryEnabled,
ApiType apiType,
GlobalEndpointManager globalEndpointManager,
ConnectionPolicy connectionPolicy,
IOpenConnectionsHandler openConnectionsHandler) {
this(clientContext,
serviceEndpoint,
protocol,
tokenProvider,
userAgent,
httpClient,
DefaultSuboptimalPartitionForceRefreshIntervalInSeconds,
tcpConnectionEndpointRediscoveryEnabled,
apiType,
globalEndpointManager,
connectionPolicy,
openConnectionsHandler);
}
@Override
public int updateAddresses(final URI serverKey) {
Objects.requireNonNull(serverKey, "expected non-null serverKey");
AtomicInteger updatedCacheEntryCount = new AtomicInteger(0);
if (this.tcpConnectionEndpointRediscoveryEnabled) {
this.serverPartitionAddressToPkRangeIdMap.computeIfPresent(serverKey, (uri, partitionKeyRangeIdentitySet) -> {
for (PartitionKeyRangeIdentity partitionKeyRangeIdentity : partitionKeyRangeIdentitySet) {
if (partitionKeyRangeIdentity.getPartitionKeyRangeId().equals(PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) {
this.masterPartitionAddressCache = null;
} else {
this.serverPartitionAddressCache.remove(partitionKeyRangeIdentity);
}
updatedCacheEntryCount.incrementAndGet();
}
return null;
});
} else {
logger.warn("tcpConnectionEndpointRediscovery is not enabled, should not reach here.");
}
return updatedCacheEntryCount.get();
}
@Override
public Mono<Utils.ValueHolder<AddressInformation[]>> tryGetAddresses(RxDocumentServiceRequest request,
PartitionKeyRangeIdentity partitionKeyRangeIdentity,
boolean forceRefreshPartitionAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
Utils.checkNotNullOrThrow(partitionKeyRangeIdentity, "partitionKeyRangeIdentity", "");
logger.debug("PartitionKeyRangeIdentity {}, forceRefreshPartitionAddresses {}",
partitionKeyRangeIdentity,
forceRefreshPartitionAddresses);
if (StringUtils.equals(partitionKeyRangeIdentity.getPartitionKeyRangeId(),
PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) {
return this.resolveMasterAsync(request, forceRefreshPartitionAddresses, request.properties)
.map(partitionKeyRangeIdentityPair -> new Utils.ValueHolder<>(partitionKeyRangeIdentityPair.getRight()));
}
evaluateCollectionRoutingMapRefreshForServerPartition(
request, partitionKeyRangeIdentity, forceRefreshPartitionAddresses);
Instant suboptimalServerPartitionTimestamp = this.suboptimalServerPartitionTimestamps.get(partitionKeyRangeIdentity);
if (suboptimalServerPartitionTimestamp != null) {
logger.debug("suboptimalServerPartitionTimestamp is {}", suboptimalServerPartitionTimestamp);
boolean forceRefreshDueToSuboptimalPartitionReplicaSet = Duration.between(suboptimalServerPartitionTimestamp, Instant.now()).getSeconds()
> this.suboptimalPartitionForceRefreshIntervalInSeconds;
if (forceRefreshDueToSuboptimalPartitionReplicaSet) {
Instant newValue = this.suboptimalServerPartitionTimestamps.computeIfPresent(partitionKeyRangeIdentity,
(key, oldVal) -> {
logger.debug("key = {}, oldValue = {}", key, oldVal);
if (suboptimalServerPartitionTimestamp.equals(oldVal)) {
return Instant.MAX;
} else {
return oldVal;
}
});
logger.debug("newValue is {}", newValue);
if (!suboptimalServerPartitionTimestamp.equals(newValue)) {
logger.debug("setting forceRefreshPartitionAddresses to true");
forceRefreshPartitionAddresses = true;
}
}
}
final boolean forceRefreshPartitionAddressesModified = forceRefreshPartitionAddresses;
if (forceRefreshPartitionAddressesModified) {
logger.debug("refresh serverPartitionAddressCache for {}", partitionKeyRangeIdentity);
for (Uri uri : request.requestContext.getFailedEndpoints()) {
uri.setUnhealthy();
}
this.serverPartitionAddressCache.refreshWithInitFunction(
partitionKeyRangeIdentity,
cachedAddresses -> this.getAddressesForRangeId(
request,
partitionKeyRangeIdentity,
true,
cachedAddresses));
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
}
Mono<Utils.ValueHolder<AddressInformation[]>> addressesObs =
this.serverPartitionAddressCache
.getAsyncWithInitFunction(
partitionKeyRangeIdentity,
null,
cachedAddresses -> this.getAddressesForRangeId(
request,
partitionKeyRangeIdentity,
false,
cachedAddresses))
.map(Utils.ValueHolder::new);
return addressesObs
.map(
addressesValueHolder -> {
if (notAllReplicasAvailable(addressesValueHolder.v)) {
if (logger.isDebugEnabled()) {
logger.debug("not all replicas available {}", JavaStreamUtils.info(addressesValueHolder.v));
}
this.suboptimalServerPartitionTimestamps.putIfAbsent(partitionKeyRangeIdentity, Instant.now());
}
if (Arrays
.stream(addressesValueHolder.v)
.anyMatch(addressInformation -> addressInformation.getPhysicalUri().shouldRefreshHealthStatus())) {
logger.info("refresh cache due to address uri in unhealthy status");
this.serverPartitionAddressCache.refreshWithInitFunction(
partitionKeyRangeIdentity,
cachedAddresses -> this.getAddressesForRangeId(
request,
partitionKeyRangeIdentity,
true,
cachedAddresses));
}
return addressesValueHolder;
})
.onErrorResume(ex -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(ex);
CosmosException dce = Utils.as(unwrappedException, CosmosException.class);
if (dce == null) {
logger.error("unexpected failure", ex);
if (forceRefreshPartitionAddressesModified) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
}
return Mono.error(unwrappedException);
} else {
logger.debug("tryGetAddresses dce", dce);
if (Exceptions.isStatusCode(dce, HttpConstants.StatusCodes.NOTFOUND) ||
Exceptions.isStatusCode(dce, HttpConstants.StatusCodes.GONE) ||
Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE)) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
logger.debug("tryGetAddresses: inner onErrorResumeNext return null", dce);
return Mono.just(new Utils.ValueHolder<>(null));
}
return Mono.error(unwrappedException);
}
});
}
@Override
public void setOpenConnectionsHandler(IOpenConnectionsHandler openConnectionsHandler) {
this.openConnectionsHandler = openConnectionsHandler;
}
public Mono<List<Address>> getServerAddressesViaGatewayAsync(
RxDocumentServiceRequest request,
String collectionRid,
List<String> partitionKeyRangeIds,
boolean forceRefresh) {
if (logger.isDebugEnabled()) {
logger.debug("getServerAddressesViaGatewayAsync collectionRid {}, partitionKeyRangeIds {}", collectionRid,
JavaStreamUtils.toString(partitionKeyRangeIds, ","));
}
request.setAddressRefresh(true, forceRefresh);
String entryUrl = PathsHelper.generatePath(ResourceType.Document, collectionRid, true);
HashMap<String, String> addressQuery = new HashMap<>();
addressQuery.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl));
HashMap<String, String> headers = new HashMap<>(defaultRequestHeaders);
if (forceRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, "true");
}
if (request.forceCollectionRoutingMapRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_COLLECTION_ROUTING_MAP_REFRESH, "true");
}
addressQuery.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter));
addressQuery.put(HttpConstants.QueryStrings.PARTITION_KEY_RANGE_IDS, String.join(",", partitionKeyRangeIds));
headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
String token = null;
try {
token = this.tokenProvider.getUserAuthorizationToken(
collectionRid,
ResourceType.Document,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
request.properties);
} catch (UnauthorizedException e) {
if (logger.isDebugEnabled()) {
logger.debug("User doesn't have resource token for collection rid {}", collectionRid);
}
}
if (token == null && request.getIsNameBased()) {
String collectionAltLink = PathsHelper.getCollectionPath(request.getResourceAddress());
token = this.tokenProvider.getUserAuthorizationToken(
collectionAltLink,
ResourceType.Document,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
request.properties);
}
token = HttpUtils.urlEncode(token);
headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, token);
}
URI targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(addressQuery));
String identifier = logAddressResolutionStart(
request, targetEndpoint, forceRefresh, request.forceCollectionRoutingMapRefresh);
HttpHeaders httpHeaders = new HttpHeaders(headers);
Instant addressCallStartTime = Instant.now();
HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders);
Mono<HttpResponse> httpResponseMono;
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
httpResponseMono = this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()));
} else {
httpResponseMono = tokenProvider
.populateAuthorizationHeader(httpHeaders)
.flatMap(valueHttpHeaders -> this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds())));
}
Mono<RxDocumentServiceResponse> dsrObs = HttpClientUtils.parseResponseAsync(request, clientContext, httpResponseMono, httpRequest);
return dsrObs.map(
dsr -> {
MetadataDiagnosticsContext metadataDiagnosticsContext =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (metadataDiagnosticsContext != null) {
Instant addressCallEndTime = Instant.now();
MetadataDiagnostics metaDataDiagnostic = new MetadataDiagnostics(addressCallStartTime,
addressCallEndTime,
MetadataType.SERVER_ADDRESS_LOOKUP);
metadataDiagnosticsContext.addMetaDataDiagnostic(metaDataDiagnostic);
}
if (logger.isDebugEnabled()) {
logger.debug("getServerAddressesViaGatewayAsync deserializes result");
}
logAddressResolutionEnd(request, identifier, null);
return dsr.getQueryResponse(null, Address.class);
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
logAddressResolutionEnd(request, identifier, unwrappedException.toString());
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(
request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, this.globalEndpointManager);
}
return Mono.error(dce);
});
}
public void dispose() {
}
private Mono<Pair<PartitionKeyRangeIdentity, AddressInformation[]>> resolveMasterAsync(RxDocumentServiceRequest request, boolean forceRefresh, Map<String, Object> properties) {
logger.debug("resolveMasterAsync forceRefresh: {}", forceRefresh);
Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterAddressAndRangeInitial = this.masterPartitionAddressCache;
forceRefresh = forceRefresh ||
(masterAddressAndRangeInitial != null &&
notAllReplicasAvailable(masterAddressAndRangeInitial.getRight()) &&
Duration.between(this.suboptimalMasterPartitionTimestamp, Instant.now()).getSeconds() > this.suboptimalPartitionForceRefreshIntervalInSeconds);
if (forceRefresh || this.masterPartitionAddressCache == null) {
Mono<List<Address>> masterReplicaAddressesObs = this.getMasterAddressesViaGatewayAsync(
request,
ResourceType.Database,
null,
databaseFeedEntryUrl,
forceRefresh,
false,
properties);
return masterReplicaAddressesObs.map(
masterAddresses -> {
Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterAddressAndRangeRes =
this.toPartitionAddressAndRange("", masterAddresses);
this.masterPartitionAddressCache = masterAddressAndRangeRes;
if (notAllReplicasAvailable(masterAddressAndRangeRes.getRight())
&& this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) {
this.suboptimalMasterPartitionTimestamp = Instant.now();
} else {
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
}
return masterPartitionAddressCache;
})
.doOnError(
e -> {
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
});
} else {
if (notAllReplicasAvailable(masterAddressAndRangeInitial.getRight())
&& this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) {
this.suboptimalMasterPartitionTimestamp = Instant.now();
}
return Mono.just(masterAddressAndRangeInitial);
}
}
private void evaluateCollectionRoutingMapRefreshForServerPartition(
RxDocumentServiceRequest request,
PartitionKeyRangeIdentity pkRangeIdentity,
boolean forceRefreshPartitionAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
validatePkRangeIdentity(pkRangeIdentity);
String collectionRid = pkRangeIdentity.getCollectionRid();
String partitionKeyRangeId = pkRangeIdentity.getPartitionKeyRangeId();
if (forceRefreshPartitionAddresses) {
ForcedRefreshMetadata forcedRefreshMetadata = this.lastForcedRefreshMap.computeIfAbsent(
collectionRid,
(colRid) -> new ForcedRefreshMetadata());
if (request.forceCollectionRoutingMapRefresh) {
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
true);
} else if (forcedRefreshMetadata.shouldIncludeCollectionRoutingMapRefresh(pkRangeIdentity)) {
request.forceCollectionRoutingMapRefresh = true;
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
true);
} else {
forcedRefreshMetadata.signalPartitionAddressOnlyRefresh(pkRangeIdentity);
}
} else if (request.forceCollectionRoutingMapRefresh) {
ForcedRefreshMetadata forcedRefreshMetadata = this.lastForcedRefreshMap.computeIfAbsent(
collectionRid,
(colRid) -> new ForcedRefreshMetadata());
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
false);
}
logger.debug("evaluateCollectionRoutingMapRefreshForServerPartition collectionRid {}, partitionKeyRangeId {},"
+ " " +
"forceRefreshPartitionAddresses {}, forceCollectionRoutingMapRefresh {}",
collectionRid,
partitionKeyRangeId,
forceRefreshPartitionAddresses,
request.forceCollectionRoutingMapRefresh);
}
private void validatePkRangeIdentity(PartitionKeyRangeIdentity pkRangeIdentity) {
Utils.checkNotNullOrThrow(pkRangeIdentity, "pkRangeId", "");
Utils.checkNotNullOrThrow(
pkRangeIdentity.getCollectionRid(),
"pkRangeId.getCollectionRid()",
"");
Utils.checkNotNullOrThrow(
pkRangeIdentity.getPartitionKeyRangeId(),
"pkRangeId.getPartitionKeyRangeId()",
"");
}
private Mono<AddressInformation[]> getAddressesForRangeId(
RxDocumentServiceRequest request,
PartitionKeyRangeIdentity pkRangeIdentity,
boolean forceRefresh,
AddressInformation[] cachedAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
validatePkRangeIdentity(pkRangeIdentity);
String collectionRid = pkRangeIdentity.getCollectionRid();
String partitionKeyRangeId = pkRangeIdentity.getPartitionKeyRangeId();
logger.debug(
"getAddressesForRangeId collectionRid {}, partitionKeyRangeId {}, forceRefresh {}",
collectionRid,
partitionKeyRangeId,
forceRefresh);
Mono<List<Address>> addressResponse = this.getServerAddressesViaGatewayAsync(request, collectionRid, Collections.singletonList(partitionKeyRangeId), forceRefresh);
Mono<List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>>> addressInfos =
addressResponse.map(
addresses -> {
if (logger.isDebugEnabled()) {
logger.debug("addresses from getServerAddressesViaGatewayAsync in getAddressesForRangeId {}",
JavaStreamUtils.info(addresses));
}
return addresses
.stream()
.filter(addressInfo -> this.protocolScheme.equals(addressInfo.getProtocolScheme()))
.collect(Collectors.groupingBy(Address::getParitionKeyRangeId))
.values()
.stream()
.map(groupedAddresses -> toPartitionAddressAndRange(collectionRid, addresses))
.collect(Collectors.toList());
});
Mono<List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>>> result =
addressInfos
.map(addressInfo -> addressInfo.stream()
.filter(a -> StringUtils.equals(a.getLeft().getPartitionKeyRangeId(), partitionKeyRangeId))
.collect(Collectors.toList()));
return result
.flatMap(
list -> {
if (logger.isDebugEnabled()) {
logger.debug("getAddressesForRangeId flatMap got result {}", JavaStreamUtils.info(list));
}
if (list.isEmpty()) {
String errorMessage = String.format(
RMResources.PartitionKeyRangeNotFound,
partitionKeyRangeId,
collectionRid);
PartitionKeyRangeGoneException e = new PartitionKeyRangeGoneException(errorMessage);
BridgeInternal.setResourceAddress(e, collectionRid);
return Mono.error(e);
} else {
AddressInformation[] mergedAddresses = this.mergeAddresses(list.get(0).getRight(), cachedAddresses);
for (AddressInformation address : mergedAddresses) {
address.getPhysicalUri().setRefreshed();
}
if (this.replicaAddressValidationEnabled) {
this.validateReplicaAddresses(mergedAddresses);
}
return Mono.just(mergedAddresses);
}
})
.doOnError(e -> logger.debug("getAddressesForRangeId", e));
}
public Mono<List<Address>> getMasterAddressesViaGatewayAsync(
RxDocumentServiceRequest request,
ResourceType resourceType,
String resourceAddress,
String entryUrl,
boolean forceRefresh,
boolean useMasterCollectionResolver,
Map<String, Object> properties) {
logger.debug("getMasterAddressesViaGatewayAsync " +
"resourceType {}, " +
"resourceAddress {}, " +
"entryUrl {}, " +
"forceRefresh {}, " +
"useMasterCollectionResolver {}",
resourceType,
resourceAddress,
entryUrl,
forceRefresh,
useMasterCollectionResolver
);
request.setAddressRefresh(true, forceRefresh);
HashMap<String, String> queryParameters = new HashMap<>();
queryParameters.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl));
HashMap<String, String> headers = new HashMap<>(defaultRequestHeaders);
if (forceRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, "true");
}
if (useMasterCollectionResolver) {
headers.put(HttpConstants.HttpHeaders.USE_MASTER_COLLECTION_RESOLVER, "true");
}
if(request.forceCollectionRoutingMapRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_COLLECTION_ROUTING_MAP_REFRESH, "true");
}
queryParameters.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter));
headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
String token = this.tokenProvider.getUserAuthorizationToken(
resourceAddress,
resourceType,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
properties);
headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, HttpUtils.urlEncode(token));
}
URI targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(queryParameters));
String identifier = logAddressResolutionStart(
request, targetEndpoint, true, true);
HttpHeaders defaultHttpHeaders = new HttpHeaders(headers);
HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), defaultHttpHeaders);
Instant addressCallStartTime = Instant.now();
Mono<HttpResponse> httpResponseMono;
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
httpResponseMono = this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()));
} else {
httpResponseMono = tokenProvider
.populateAuthorizationHeader(defaultHttpHeaders)
.flatMap(valueHttpHeaders -> this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds())));
}
Mono<RxDocumentServiceResponse> dsrObs = HttpClientUtils.parseResponseAsync(request, this.clientContext, httpResponseMono, httpRequest);
return dsrObs.map(
dsr -> {
MetadataDiagnosticsContext metadataDiagnosticsContext =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (metadataDiagnosticsContext != null) {
Instant addressCallEndTime = Instant.now();
MetadataDiagnostics metaDataDiagnostic = new MetadataDiagnostics(addressCallStartTime,
addressCallEndTime,
MetadataType.MASTER_ADDRESS_LOOK_UP);
metadataDiagnosticsContext.addMetaDataDiagnostic(metaDataDiagnostic);
}
logAddressResolutionEnd(request, identifier, null);
return dsr.getQueryResponse(null, Address.class);
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
logAddressResolutionEnd(request, identifier, unwrappedException.toString());
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(
request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, this.globalEndpointManager);
}
return Mono.error(dce);
});
}
/***
* merge the new addresses get back from gateway with the cached addresses.
* If the address is being returned from gateway again, then keep using the cached addressInformation object
* If it is a new address being returned, then use the new addressInformation object.
*
* @param newAddresses the latest addresses being returned from gateway.
* @param cachedAddresses the cached addresses.
*
* @return the merged addresses.
*/
private AddressInformation[] mergeAddresses(AddressInformation[] newAddresses, AddressInformation[] cachedAddresses) {
checkNotNull(newAddresses, "Argument 'newAddresses' should not be null");
if (cachedAddresses == null) {
return newAddresses;
}
List<AddressInformation> mergedAddresses = new ArrayList<>();
Map<Uri, AddressInformation> cachedAddressMap =
Arrays
.stream(cachedAddresses)
.collect(Collectors.toMap(address -> address.getPhysicalUri(), address -> address));
for (AddressInformation addressInformation : newAddresses) {
if (cachedAddressMap.containsKey(addressInformation.getPhysicalUri())) {
mergedAddresses.add(cachedAddressMap.get(addressInformation.getPhysicalUri()));
} else {
mergedAddresses.add(addressInformation);
}
}
return mergedAddresses.toArray(new AddressInformation[mergedAddresses.size()]);
}
private Pair<PartitionKeyRangeIdentity, AddressInformation[]> toPartitionAddressAndRange(String collectionRid, List<Address> addresses) {
if (logger.isDebugEnabled()) {
logger.debug("toPartitionAddressAndRange");
}
Address address = addresses.get(0);
PartitionKeyRangeIdentity partitionKeyRangeIdentity = new PartitionKeyRangeIdentity(collectionRid, address.getParitionKeyRangeId());
AddressInformation[] addressInfos =
addresses
.stream()
.map(addr -> GatewayAddressCache.toAddressInformation(addr))
.collect(Collectors.toList())
.toArray(new AddressInformation[addresses.size()]);
if (this.tcpConnectionEndpointRediscoveryEnabled) {
for (AddressInformation addressInfo : addressInfos) {
if (logger.isDebugEnabled()) {
logger.debug(
"Added address to serverPartitionAddressToPkRangeIdMap: ({\"partitionKeyRangeIdentity\":{},\"address\":{}})",
partitionKeyRangeIdentity,
addressInfo);
}
this.serverPartitionAddressToPkRangeIdMap.compute(addressInfo.getServerKey(), (serverKey, partitionKeyRangeIdentitySet) -> {
if (partitionKeyRangeIdentitySet == null) {
partitionKeyRangeIdentitySet = ConcurrentHashMap.newKeySet();
}
partitionKeyRangeIdentitySet.add(partitionKeyRangeIdentity);
return partitionKeyRangeIdentitySet;
});
}
}
return Pair.of(partitionKeyRangeIdentity, addressInfos);
}
private static AddressInformation toAddressInformation(Address address) {
return new AddressInformation(true, address.isPrimary(), address.getPhyicalUri(), address.getProtocolScheme());
}
public Flux<OpenConnectionResponse> openConnectionsAndInitCaches(
DocumentCollection collection,
List<PartitionKeyRangeIdentity> partitionKeyRangeIdentities) {
checkNotNull(collection, "Argument 'collection' should not be null");
checkNotNull(partitionKeyRangeIdentities, "Argument 'partitionKeyRangeIdentities' should not be null");
if (logger.isDebugEnabled()) {
logger.debug(
"openConnectionsAndInitCaches collection: {}, partitionKeyRangeIdentities: {}",
collection.getResourceId(),
JavaStreamUtils.toString(partitionKeyRangeIdentities, ","));
}
List<Flux<List<Address>>> tasks = new ArrayList<>();
int batchSize = GatewayAddressCache.DefaultBatchSize;
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this.clientContext,
OperationType.Read,
collection.getResourceId(),
ResourceType.DocumentCollection,
Collections.emptyMap());
for (int i = 0; i < partitionKeyRangeIdentities.size(); i += batchSize) {
int endIndex = i + batchSize;
endIndex = Math.min(endIndex, partitionKeyRangeIdentities.size());
tasks.add(
this.getServerAddressesViaGatewayWithRetry(
request,
collection.getResourceId(),
partitionKeyRangeIdentities
.subList(i, endIndex)
.stream()
.map(PartitionKeyRangeIdentity::getPartitionKeyRangeId)
.collect(Collectors.toList()),
false).flux());
}
return Flux.concat(tasks)
.flatMap(list -> {
List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>> addressInfos =
list.stream()
.filter(addressInfo -> this.protocolScheme.equals(addressInfo.getProtocolScheme()))
.collect(Collectors.groupingBy(Address::getParitionKeyRangeId))
.values()
.stream().map(addresses -> toPartitionAddressAndRange(collection.getResourceId(), addresses))
.collect(Collectors.toList());
return Flux.fromIterable(addressInfos)
.flatMap(addressInfo -> {
this.serverPartitionAddressCache.set(addressInfo.getLeft(), addressInfo.getRight());
if (this.openConnectionsHandler != null) {
return this.openConnectionsHandler.openConnections(
Arrays
.stream(addressInfo.getRight())
.map(addressInformation -> addressInformation.getPhysicalUri())
.collect(Collectors.toList()));
}
logger.info("OpenConnectionHandler is null, can not open connections");
return Flux.empty();
});
});
}
private Mono<List<Address>> getServerAddressesViaGatewayWithRetry(
RxDocumentServiceRequest request,
String collectionRid,
List<String> partitionKeyRangeIds,
boolean forceRefresh) {
OpenConnectionAndInitCachesRetryPolicy openConnectionAndInitCachesRetryPolicy =
new OpenConnectionAndInitCachesRetryPolicy(this.connectionPolicy.getThrottlingRetryOptions());
return BackoffRetryUtility.executeRetry(
() -> this.getServerAddressesViaGatewayAsync(request, collectionRid, partitionKeyRangeIds, forceRefresh),
openConnectionAndInitCachesRetryPolicy);
}
private boolean notAllReplicasAvailable(AddressInformation[] addressInformations) {
return addressInformations.length < ServiceConfig.SystemReplicationPolicy.MaxReplicaSetSize;
}
private static String logAddressResolutionStart(
RxDocumentServiceRequest request,
URI targetEndpointUrl,
boolean forceRefresh,
boolean forceCollectionRoutingMapRefresh) {
if (request.requestContext.cosmosDiagnostics != null) {
return BridgeInternal.recordAddressResolutionStart(
request.requestContext.cosmosDiagnostics,
targetEndpointUrl,
forceRefresh,
forceCollectionRoutingMapRefresh);
}
return null;
}
private static void logAddressResolutionEnd(RxDocumentServiceRequest request, String identifier, String errorMessage) {
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordAddressResolutionEnd(request.requestContext.cosmosDiagnostics, identifier, errorMessage);
}
}
private static class ForcedRefreshMetadata {
private final ConcurrentHashMap<PartitionKeyRangeIdentity, Instant> lastPartitionAddressOnlyRefresh;
private Instant lastCollectionRoutingMapRefresh;
public ForcedRefreshMetadata() {
lastPartitionAddressOnlyRefresh = new ConcurrentHashMap<>();
lastCollectionRoutingMapRefresh = Instant.now();
}
public void signalCollectionRoutingMapRefresh(
PartitionKeyRangeIdentity pk,
boolean forcePartitionAddressRefresh) {
Instant nowSnapshot = Instant.now();
if (forcePartitionAddressRefresh) {
lastPartitionAddressOnlyRefresh.put(pk, nowSnapshot);
}
lastCollectionRoutingMapRefresh = nowSnapshot;
}
public void signalPartitionAddressOnlyRefresh(PartitionKeyRangeIdentity pk) {
lastPartitionAddressOnlyRefresh.put(pk, Instant.now());
}
public boolean shouldIncludeCollectionRoutingMapRefresh(PartitionKeyRangeIdentity pk) {
Instant lastPartitionAddressRefreshSnapshot = lastPartitionAddressOnlyRefresh.get(pk);
Instant lastCollectionRoutingMapRefreshSnapshot = lastCollectionRoutingMapRefresh;
if (lastPartitionAddressRefreshSnapshot == null ||
!lastPartitionAddressRefreshSnapshot.isAfter(lastCollectionRoutingMapRefreshSnapshot)) {
return false;
}
Duration durationSinceLastForcedCollectionRoutingMapRefresh =
Duration.between(lastCollectionRoutingMapRefreshSnapshot, Instant.now());
boolean returnValue = durationSinceLastForcedCollectionRoutingMapRefresh
.compareTo(minDurationBeforeEnforcingCollectionRoutingMapRefresh) >= 0;
return returnValue;
}
}
} | class GatewayAddressCache implements IAddressCache {
private final static Duration minDurationBeforeEnforcingCollectionRoutingMapRefresh = Duration.ofSeconds(30);
private final static Logger logger = LoggerFactory.getLogger(GatewayAddressCache.class);
private final static String protocolFilterFormat = "%s eq %s";
private final static int DefaultBatchSize = 50;
private final static int DefaultSuboptimalPartitionForceRefreshIntervalInSeconds = 600;
private final DiagnosticsClientContext clientContext;
private final String databaseFeedEntryUrl = PathsHelper.generatePath(ResourceType.Database, "", true);
private final URI addressEndpoint;
private final AsyncCacheNonBlocking<PartitionKeyRangeIdentity, AddressInformation[]> serverPartitionAddressCache;
private final ConcurrentHashMap<PartitionKeyRangeIdentity, Instant> suboptimalServerPartitionTimestamps;
private final long suboptimalPartitionForceRefreshIntervalInSeconds;
private final String protocolScheme;
private final String protocolFilter;
private final IAuthorizationTokenProvider tokenProvider;
private final HashMap<String, String> defaultRequestHeaders;
private final HttpClient httpClient;
private volatile Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterPartitionAddressCache;
private volatile Instant suboptimalMasterPartitionTimestamp;
private final ConcurrentHashMap<URI, Set<PartitionKeyRangeIdentity>> serverPartitionAddressToPkRangeIdMap;
private final boolean tcpConnectionEndpointRediscoveryEnabled;
private final ConcurrentHashMap<String, ForcedRefreshMetadata> lastForcedRefreshMap;
private final GlobalEndpointManager globalEndpointManager;
private IOpenConnectionsHandler openConnectionsHandler;
private final ConnectionPolicy connectionPolicy;
private final boolean replicaAddressValidationEnabled;
public GatewayAddressCache(
DiagnosticsClientContext clientContext,
URI serviceEndpoint,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
UserAgentContainer userAgent,
HttpClient httpClient,
long suboptimalPartitionForceRefreshIntervalInSeconds,
boolean tcpConnectionEndpointRediscoveryEnabled,
ApiType apiType,
GlobalEndpointManager globalEndpointManager,
ConnectionPolicy connectionPolicy,
IOpenConnectionsHandler openConnectionsHandler) {
this.clientContext = clientContext;
try {
this.addressEndpoint = new URL(serviceEndpoint.toURL(), Paths.ADDRESS_PATH_SEGMENT).toURI();
} catch (MalformedURLException | URISyntaxException e) {
logger.error("serviceEndpoint {} is invalid", serviceEndpoint, e);
assert false;
throw new IllegalStateException(e);
}
this.tokenProvider = tokenProvider;
this.serverPartitionAddressCache = new AsyncCacheNonBlocking<>();
this.suboptimalServerPartitionTimestamps = new ConcurrentHashMap<>();
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
this.suboptimalPartitionForceRefreshIntervalInSeconds = suboptimalPartitionForceRefreshIntervalInSeconds;
this.protocolScheme = protocol.scheme();
this.protocolFilter = String.format(GatewayAddressCache.protocolFilterFormat,
Constants.Properties.PROTOCOL,
this.protocolScheme);
this.httpClient = httpClient;
if (userAgent == null) {
userAgent = new UserAgentContainer();
}
defaultRequestHeaders = new HashMap<>();
defaultRequestHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgent.getUserAgent());
if(apiType != null) {
defaultRequestHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString());
}
defaultRequestHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION);
this.serverPartitionAddressToPkRangeIdMap = new ConcurrentHashMap<>();
this.tcpConnectionEndpointRediscoveryEnabled = tcpConnectionEndpointRediscoveryEnabled;
this.lastForcedRefreshMap = new ConcurrentHashMap<>();
this.globalEndpointManager = globalEndpointManager;
this.openConnectionsHandler = openConnectionsHandler;
this.connectionPolicy = connectionPolicy;
this.replicaAddressValidationEnabled = Configs.isReplicaAddressValidationEnabled();
}
public GatewayAddressCache(
DiagnosticsClientContext clientContext,
URI serviceEndpoint,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
UserAgentContainer userAgent,
HttpClient httpClient,
boolean tcpConnectionEndpointRediscoveryEnabled,
ApiType apiType,
GlobalEndpointManager globalEndpointManager,
ConnectionPolicy connectionPolicy,
IOpenConnectionsHandler openConnectionsHandler) {
this(clientContext,
serviceEndpoint,
protocol,
tokenProvider,
userAgent,
httpClient,
DefaultSuboptimalPartitionForceRefreshIntervalInSeconds,
tcpConnectionEndpointRediscoveryEnabled,
apiType,
globalEndpointManager,
connectionPolicy,
openConnectionsHandler);
}
@Override
public int updateAddresses(final URI serverKey) {
Objects.requireNonNull(serverKey, "expected non-null serverKey");
AtomicInteger updatedCacheEntryCount = new AtomicInteger(0);
if (this.tcpConnectionEndpointRediscoveryEnabled) {
this.serverPartitionAddressToPkRangeIdMap.computeIfPresent(serverKey, (uri, partitionKeyRangeIdentitySet) -> {
for (PartitionKeyRangeIdentity partitionKeyRangeIdentity : partitionKeyRangeIdentitySet) {
if (partitionKeyRangeIdentity.getPartitionKeyRangeId().equals(PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) {
this.masterPartitionAddressCache = null;
} else {
this.serverPartitionAddressCache.remove(partitionKeyRangeIdentity);
}
updatedCacheEntryCount.incrementAndGet();
}
return null;
});
} else {
logger.warn("tcpConnectionEndpointRediscovery is not enabled, should not reach here.");
}
return updatedCacheEntryCount.get();
}
@Override
public Mono<Utils.ValueHolder<AddressInformation[]>> tryGetAddresses(RxDocumentServiceRequest request,
PartitionKeyRangeIdentity partitionKeyRangeIdentity,
boolean forceRefreshPartitionAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
Utils.checkNotNullOrThrow(partitionKeyRangeIdentity, "partitionKeyRangeIdentity", "");
logger.debug("PartitionKeyRangeIdentity {}, forceRefreshPartitionAddresses {}",
partitionKeyRangeIdentity,
forceRefreshPartitionAddresses);
if (StringUtils.equals(partitionKeyRangeIdentity.getPartitionKeyRangeId(),
PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) {
return this.resolveMasterAsync(request, forceRefreshPartitionAddresses, request.properties)
.map(partitionKeyRangeIdentityPair -> new Utils.ValueHolder<>(partitionKeyRangeIdentityPair.getRight()));
}
evaluateCollectionRoutingMapRefreshForServerPartition(
request, partitionKeyRangeIdentity, forceRefreshPartitionAddresses);
Instant suboptimalServerPartitionTimestamp = this.suboptimalServerPartitionTimestamps.get(partitionKeyRangeIdentity);
if (suboptimalServerPartitionTimestamp != null) {
logger.debug("suboptimalServerPartitionTimestamp is {}", suboptimalServerPartitionTimestamp);
boolean forceRefreshDueToSuboptimalPartitionReplicaSet = Duration.between(suboptimalServerPartitionTimestamp, Instant.now()).getSeconds()
> this.suboptimalPartitionForceRefreshIntervalInSeconds;
if (forceRefreshDueToSuboptimalPartitionReplicaSet) {
Instant newValue = this.suboptimalServerPartitionTimestamps.computeIfPresent(partitionKeyRangeIdentity,
(key, oldVal) -> {
logger.debug("key = {}, oldValue = {}", key, oldVal);
if (suboptimalServerPartitionTimestamp.equals(oldVal)) {
return Instant.MAX;
} else {
return oldVal;
}
});
logger.debug("newValue is {}", newValue);
if (!suboptimalServerPartitionTimestamp.equals(newValue)) {
logger.debug("setting forceRefreshPartitionAddresses to true");
forceRefreshPartitionAddresses = true;
}
}
}
final boolean forceRefreshPartitionAddressesModified = forceRefreshPartitionAddresses;
if (forceRefreshPartitionAddressesModified) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
}
Mono<Utils.ValueHolder<AddressInformation[]>> addressesObs =
this.serverPartitionAddressCache
.getAsync(
partitionKeyRangeIdentity,
cachedAddresses -> this.getAddressesForRangeId(
request,
partitionKeyRangeIdentity,
forceRefreshPartitionAddressesModified,
cachedAddresses),
cachedAddresses -> {
for (Uri failedEndpoints : request.requestContext.getFailedEndpoints()) {
failedEndpoints.setUnhealthy();
}
return forceRefreshPartitionAddressesModified
|| Arrays.stream(cachedAddresses).anyMatch(addressInformation -> addressInformation.getPhysicalUri().shouldRefreshHealthStatus());
})
.map(Utils.ValueHolder::new);
return addressesObs
.map(addressesValueHolder -> {
if (notAllReplicasAvailable(addressesValueHolder.v)) {
if (logger.isDebugEnabled()) {
logger.debug("not all replicas available {}", JavaStreamUtils.info(addressesValueHolder.v));
}
this.suboptimalServerPartitionTimestamps.putIfAbsent(partitionKeyRangeIdentity, Instant.now());
}
return addressesValueHolder;
})
.onErrorResume(ex -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(ex);
CosmosException dce = Utils.as(unwrappedException, CosmosException.class);
if (dce == null) {
logger.error("unexpected failure", ex);
if (forceRefreshPartitionAddressesModified) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
}
return Mono.error(unwrappedException);
} else {
logger.debug("tryGetAddresses dce", dce);
if (Exceptions.isNotFound(dce) ||
Exceptions.isGone(dce) ||
Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE)) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
logger.debug("tryGetAddresses: inner onErrorResumeNext return null", dce);
return Mono.just(new Utils.ValueHolder<>(null));
}
return Mono.error(unwrappedException);
}
});
}
@Override
public void setOpenConnectionsHandler(IOpenConnectionsHandler openConnectionsHandler) {
this.openConnectionsHandler = openConnectionsHandler;
}
public Mono<List<Address>> getServerAddressesViaGatewayAsync(
RxDocumentServiceRequest request,
String collectionRid,
List<String> partitionKeyRangeIds,
boolean forceRefresh) {
if (logger.isDebugEnabled()) {
logger.debug("getServerAddressesViaGatewayAsync collectionRid {}, partitionKeyRangeIds {}", collectionRid,
JavaStreamUtils.toString(partitionKeyRangeIds, ","));
}
request.setAddressRefresh(true, forceRefresh);
String entryUrl = PathsHelper.generatePath(ResourceType.Document, collectionRid, true);
HashMap<String, String> addressQuery = new HashMap<>();
addressQuery.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl));
HashMap<String, String> headers = new HashMap<>(defaultRequestHeaders);
if (forceRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, "true");
}
if (request.forceCollectionRoutingMapRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_COLLECTION_ROUTING_MAP_REFRESH, "true");
}
addressQuery.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter));
addressQuery.put(HttpConstants.QueryStrings.PARTITION_KEY_RANGE_IDS, String.join(",", partitionKeyRangeIds));
headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
String token = null;
try {
token = this.tokenProvider.getUserAuthorizationToken(
collectionRid,
ResourceType.Document,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
request.properties);
} catch (UnauthorizedException e) {
if (logger.isDebugEnabled()) {
logger.debug("User doesn't have resource token for collection rid {}", collectionRid);
}
}
if (token == null && request.getIsNameBased()) {
String collectionAltLink = PathsHelper.getCollectionPath(request.getResourceAddress());
token = this.tokenProvider.getUserAuthorizationToken(
collectionAltLink,
ResourceType.Document,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
request.properties);
}
token = HttpUtils.urlEncode(token);
headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, token);
}
URI targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(addressQuery));
String identifier = logAddressResolutionStart(
request, targetEndpoint, forceRefresh, request.forceCollectionRoutingMapRefresh);
HttpHeaders httpHeaders = new HttpHeaders(headers);
Instant addressCallStartTime = Instant.now();
HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders);
Mono<HttpResponse> httpResponseMono;
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
httpResponseMono = this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()));
} else {
httpResponseMono = tokenProvider
.populateAuthorizationHeader(httpHeaders)
.flatMap(valueHttpHeaders -> this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds())));
}
Mono<RxDocumentServiceResponse> dsrObs = HttpClientUtils.parseResponseAsync(request, clientContext, httpResponseMono, httpRequest);
return dsrObs.map(
dsr -> {
MetadataDiagnosticsContext metadataDiagnosticsContext =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (metadataDiagnosticsContext != null) {
Instant addressCallEndTime = Instant.now();
MetadataDiagnostics metaDataDiagnostic = new MetadataDiagnostics(addressCallStartTime,
addressCallEndTime,
MetadataType.SERVER_ADDRESS_LOOKUP);
metadataDiagnosticsContext.addMetaDataDiagnostic(metaDataDiagnostic);
}
if (logger.isDebugEnabled()) {
logger.debug("getServerAddressesViaGatewayAsync deserializes result");
}
logAddressResolutionEnd(request, identifier, null);
return dsr.getQueryResponse(null, Address.class);
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
logAddressResolutionEnd(request, identifier, unwrappedException.toString());
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(
request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, this.globalEndpointManager);
}
return Mono.error(dce);
});
}
public void dispose() {
}
private Mono<Pair<PartitionKeyRangeIdentity, AddressInformation[]>> resolveMasterAsync(RxDocumentServiceRequest request, boolean forceRefresh, Map<String, Object> properties) {
logger.debug("resolveMasterAsync forceRefresh: {}", forceRefresh);
Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterAddressAndRangeInitial = this.masterPartitionAddressCache;
forceRefresh = forceRefresh ||
(masterAddressAndRangeInitial != null &&
notAllReplicasAvailable(masterAddressAndRangeInitial.getRight()) &&
Duration.between(this.suboptimalMasterPartitionTimestamp, Instant.now()).getSeconds() > this.suboptimalPartitionForceRefreshIntervalInSeconds);
if (forceRefresh || this.masterPartitionAddressCache == null) {
Mono<List<Address>> masterReplicaAddressesObs = this.getMasterAddressesViaGatewayAsync(
request,
ResourceType.Database,
null,
databaseFeedEntryUrl,
forceRefresh,
false,
properties);
return masterReplicaAddressesObs.map(
masterAddresses -> {
Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterAddressAndRangeRes =
this.toPartitionAddressAndRange("", masterAddresses);
this.masterPartitionAddressCache = masterAddressAndRangeRes;
if (notAllReplicasAvailable(masterAddressAndRangeRes.getRight())
&& this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) {
this.suboptimalMasterPartitionTimestamp = Instant.now();
} else {
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
}
return masterPartitionAddressCache;
})
.doOnError(
e -> {
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
});
} else {
if (notAllReplicasAvailable(masterAddressAndRangeInitial.getRight())
&& this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) {
this.suboptimalMasterPartitionTimestamp = Instant.now();
}
return Mono.just(masterAddressAndRangeInitial);
}
}
private void evaluateCollectionRoutingMapRefreshForServerPartition(
RxDocumentServiceRequest request,
PartitionKeyRangeIdentity pkRangeIdentity,
boolean forceRefreshPartitionAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
validatePkRangeIdentity(pkRangeIdentity);
String collectionRid = pkRangeIdentity.getCollectionRid();
String partitionKeyRangeId = pkRangeIdentity.getPartitionKeyRangeId();
if (forceRefreshPartitionAddresses) {
ForcedRefreshMetadata forcedRefreshMetadata = this.lastForcedRefreshMap.computeIfAbsent(
collectionRid,
(colRid) -> new ForcedRefreshMetadata());
if (request.forceCollectionRoutingMapRefresh) {
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
true);
} else if (forcedRefreshMetadata.shouldIncludeCollectionRoutingMapRefresh(pkRangeIdentity)) {
request.forceCollectionRoutingMapRefresh = true;
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
true);
} else {
forcedRefreshMetadata.signalPartitionAddressOnlyRefresh(pkRangeIdentity);
}
} else if (request.forceCollectionRoutingMapRefresh) {
ForcedRefreshMetadata forcedRefreshMetadata = this.lastForcedRefreshMap.computeIfAbsent(
collectionRid,
(colRid) -> new ForcedRefreshMetadata());
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
false);
}
logger.debug("evaluateCollectionRoutingMapRefreshForServerPartition collectionRid {}, partitionKeyRangeId {},"
+ " " +
"forceRefreshPartitionAddresses {}, forceCollectionRoutingMapRefresh {}",
collectionRid,
partitionKeyRangeId,
forceRefreshPartitionAddresses,
request.forceCollectionRoutingMapRefresh);
}
private void validatePkRangeIdentity(PartitionKeyRangeIdentity pkRangeIdentity) {
Utils.checkNotNullOrThrow(pkRangeIdentity, "pkRangeId", "");
Utils.checkNotNullOrThrow(
pkRangeIdentity.getCollectionRid(),
"pkRangeId.getCollectionRid()",
"");
Utils.checkNotNullOrThrow(
pkRangeIdentity.getPartitionKeyRangeId(),
"pkRangeId.getPartitionKeyRangeId()",
"");
}
private Mono<AddressInformation[]> getAddressesForRangeId(
RxDocumentServiceRequest request,
PartitionKeyRangeIdentity pkRangeIdentity,
boolean forceRefresh,
AddressInformation[] cachedAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
validatePkRangeIdentity(pkRangeIdentity);
String collectionRid = pkRangeIdentity.getCollectionRid();
String partitionKeyRangeId = pkRangeIdentity.getPartitionKeyRangeId();
logger.debug(
"getAddressesForRangeId collectionRid {}, partitionKeyRangeId {}, forceRefresh {}",
collectionRid,
partitionKeyRangeId,
forceRefresh);
Mono<List<Address>> addressResponse = this.getServerAddressesViaGatewayAsync(request, collectionRid, Collections.singletonList(partitionKeyRangeId), forceRefresh);
Mono<List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>>> addressInfos =
addressResponse.map(
addresses -> {
if (logger.isDebugEnabled()) {
logger.debug("addresses from getServerAddressesViaGatewayAsync in getAddressesForRangeId {}",
JavaStreamUtils.info(addresses));
}
return addresses
.stream()
.filter(addressInfo -> this.protocolScheme.equals(addressInfo.getProtocolScheme()))
.collect(Collectors.groupingBy(Address::getParitionKeyRangeId))
.values()
.stream()
.map(groupedAddresses -> toPartitionAddressAndRange(collectionRid, addresses))
.collect(Collectors.toList());
});
Mono<List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>>> result =
addressInfos
.map(addressInfo -> addressInfo.stream()
.filter(a -> StringUtils.equals(a.getLeft().getPartitionKeyRangeId(), partitionKeyRangeId))
.collect(Collectors.toList()));
return result
.flatMap(
list -> {
if (logger.isDebugEnabled()) {
logger.debug("getAddressesForRangeId flatMap got result {}", JavaStreamUtils.info(list));
}
if (list.isEmpty()) {
String errorMessage = String.format(
RMResources.PartitionKeyRangeNotFound,
partitionKeyRangeId,
collectionRid);
PartitionKeyRangeGoneException e = new PartitionKeyRangeGoneException(errorMessage);
BridgeInternal.setResourceAddress(e, collectionRid);
return Mono.error(e);
} else {
AddressInformation[] mergedAddresses = this.mergeAddresses(list.get(0).getRight(), cachedAddresses);
for (AddressInformation address : mergedAddresses) {
address.getPhysicalUri().setRefreshed();
}
if (this.replicaAddressValidationEnabled) {
this.validateReplicaAddresses(mergedAddresses);
}
return Mono.just(mergedAddresses);
}
})
.doOnError(e -> logger.debug("getAddressesForRangeId", e));
}
public Mono<List<Address>> getMasterAddressesViaGatewayAsync(
RxDocumentServiceRequest request,
ResourceType resourceType,
String resourceAddress,
String entryUrl,
boolean forceRefresh,
boolean useMasterCollectionResolver,
Map<String, Object> properties) {
logger.debug("getMasterAddressesViaGatewayAsync " +
"resourceType {}, " +
"resourceAddress {}, " +
"entryUrl {}, " +
"forceRefresh {}, " +
"useMasterCollectionResolver {}",
resourceType,
resourceAddress,
entryUrl,
forceRefresh,
useMasterCollectionResolver
);
request.setAddressRefresh(true, forceRefresh);
HashMap<String, String> queryParameters = new HashMap<>();
queryParameters.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl));
HashMap<String, String> headers = new HashMap<>(defaultRequestHeaders);
if (forceRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, "true");
}
if (useMasterCollectionResolver) {
headers.put(HttpConstants.HttpHeaders.USE_MASTER_COLLECTION_RESOLVER, "true");
}
if(request.forceCollectionRoutingMapRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_COLLECTION_ROUTING_MAP_REFRESH, "true");
}
queryParameters.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter));
headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
String token = this.tokenProvider.getUserAuthorizationToken(
resourceAddress,
resourceType,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
properties);
headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, HttpUtils.urlEncode(token));
}
URI targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(queryParameters));
String identifier = logAddressResolutionStart(
request, targetEndpoint, true, true);
HttpHeaders defaultHttpHeaders = new HttpHeaders(headers);
HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), defaultHttpHeaders);
Instant addressCallStartTime = Instant.now();
Mono<HttpResponse> httpResponseMono;
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
httpResponseMono = this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()));
} else {
httpResponseMono = tokenProvider
.populateAuthorizationHeader(defaultHttpHeaders)
.flatMap(valueHttpHeaders -> this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds())));
}
Mono<RxDocumentServiceResponse> dsrObs = HttpClientUtils.parseResponseAsync(request, this.clientContext, httpResponseMono, httpRequest);
return dsrObs.map(
dsr -> {
MetadataDiagnosticsContext metadataDiagnosticsContext =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (metadataDiagnosticsContext != null) {
Instant addressCallEndTime = Instant.now();
MetadataDiagnostics metaDataDiagnostic = new MetadataDiagnostics(addressCallStartTime,
addressCallEndTime,
MetadataType.MASTER_ADDRESS_LOOK_UP);
metadataDiagnosticsContext.addMetaDataDiagnostic(metaDataDiagnostic);
}
logAddressResolutionEnd(request, identifier, null);
return dsr.getQueryResponse(null, Address.class);
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
logAddressResolutionEnd(request, identifier, unwrappedException.toString());
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(
request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, this.globalEndpointManager);
}
return Mono.error(dce);
});
}
/***
* merge the new addresses get back from gateway with the cached addresses.
* If the address is being returned from gateway again, then keep using the cached addressInformation object
* If it is a new address being returned, then use the new addressInformation object.
*
* @param newAddresses the latest addresses being returned from gateway.
* @param cachedAddresses the cached addresses.
*
* @return the merged addresses.
*/
private AddressInformation[] mergeAddresses(AddressInformation[] newAddresses, AddressInformation[] cachedAddresses) {
checkNotNull(newAddresses, "Argument 'newAddresses' should not be null");
if (cachedAddresses == null) {
return newAddresses;
}
List<AddressInformation> mergedAddresses = new ArrayList<>();
Map<Uri, List<AddressInformation>> cachedAddressMap =
Arrays
.stream(cachedAddresses)
.collect(Collectors.groupingBy(AddressInformation::getPhysicalUri));
for (AddressInformation newAddress : newAddresses) {
boolean useCachedAddress = false;
if (cachedAddressMap.containsKey(newAddress.getPhysicalUri())) {
for (AddressInformation cachedAddress : cachedAddressMap.get(newAddress.getPhysicalUri())) {
if (newAddress.getProtocol() == cachedAddress.getProtocol()
&& newAddress.isPublic() == cachedAddress.isPublic()
&& newAddress.isPrimary() == cachedAddress.isPrimary()) {
useCachedAddress = true;
mergedAddresses.add(cachedAddress);
break;
}
}
}
if (!useCachedAddress) {
mergedAddresses.add(newAddress);
}
}
return mergedAddresses.toArray(new AddressInformation[mergedAddresses.size()]);
}
private Pair<PartitionKeyRangeIdentity, AddressInformation[]> toPartitionAddressAndRange(String collectionRid, List<Address> addresses) {
if (logger.isDebugEnabled()) {
logger.debug("toPartitionAddressAndRange");
}
Address address = addresses.get(0);
PartitionKeyRangeIdentity partitionKeyRangeIdentity = new PartitionKeyRangeIdentity(collectionRid, address.getParitionKeyRangeId());
AddressInformation[] addressInfos =
addresses
.stream()
.map(addr -> GatewayAddressCache.toAddressInformation(addr))
.collect(Collectors.toList())
.toArray(new AddressInformation[addresses.size()]);
if (this.tcpConnectionEndpointRediscoveryEnabled) {
for (AddressInformation addressInfo : addressInfos) {
if (logger.isDebugEnabled()) {
logger.debug(
"Added address to serverPartitionAddressToPkRangeIdMap: ({\"partitionKeyRangeIdentity\":{},\"address\":{}})",
partitionKeyRangeIdentity,
addressInfo);
}
this.serverPartitionAddressToPkRangeIdMap.compute(addressInfo.getServerKey(), (serverKey, partitionKeyRangeIdentitySet) -> {
if (partitionKeyRangeIdentitySet == null) {
partitionKeyRangeIdentitySet = ConcurrentHashMap.newKeySet();
}
partitionKeyRangeIdentitySet.add(partitionKeyRangeIdentity);
return partitionKeyRangeIdentitySet;
});
}
}
return Pair.of(partitionKeyRangeIdentity, addressInfos);
}
private static AddressInformation toAddressInformation(Address address) {
return new AddressInformation(true, address.isPrimary(), address.getPhyicalUri(), address.getProtocolScheme());
}
public Flux<OpenConnectionResponse> openConnectionsAndInitCaches(
DocumentCollection collection,
List<PartitionKeyRangeIdentity> partitionKeyRangeIdentities) {
checkNotNull(collection, "Argument 'collection' should not be null");
checkNotNull(partitionKeyRangeIdentities, "Argument 'partitionKeyRangeIdentities' should not be null");
if (logger.isDebugEnabled()) {
logger.debug(
"openConnectionsAndInitCaches collection: {}, partitionKeyRangeIdentities: {}",
collection.getResourceId(),
JavaStreamUtils.toString(partitionKeyRangeIdentities, ","));
}
List<Flux<List<Address>>> tasks = new ArrayList<>();
int batchSize = GatewayAddressCache.DefaultBatchSize;
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this.clientContext,
OperationType.Read,
collection.getResourceId(),
ResourceType.DocumentCollection,
Collections.emptyMap());
for (int i = 0; i < partitionKeyRangeIdentities.size(); i += batchSize) {
int endIndex = i + batchSize;
endIndex = Math.min(endIndex, partitionKeyRangeIdentities.size());
tasks.add(
this.getServerAddressesViaGatewayWithRetry(
request,
collection.getResourceId(),
partitionKeyRangeIdentities
.subList(i, endIndex)
.stream()
.map(PartitionKeyRangeIdentity::getPartitionKeyRangeId)
.collect(Collectors.toList()),
false).flux());
}
return Flux.concat(tasks)
.flatMap(list -> {
List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>> addressInfos =
list.stream()
.filter(addressInfo -> this.protocolScheme.equals(addressInfo.getProtocolScheme()))
.collect(Collectors.groupingBy(Address::getParitionKeyRangeId))
.values()
.stream().map(addresses -> toPartitionAddressAndRange(collection.getResourceId(), addresses))
.collect(Collectors.toList());
return Flux.fromIterable(addressInfos)
.flatMap(addressInfo -> {
this.serverPartitionAddressCache.set(addressInfo.getLeft(), addressInfo.getRight());
if (this.openConnectionsHandler != null) {
return this.openConnectionsHandler.openConnections(
Arrays
.stream(addressInfo.getRight())
.map(addressInformation -> addressInformation.getPhysicalUri())
.collect(Collectors.toList()));
}
logger.info("OpenConnectionHandler is null, can not open connections");
return Flux.empty();
});
});
}
private Mono<List<Address>> getServerAddressesViaGatewayWithRetry(
RxDocumentServiceRequest request,
String collectionRid,
List<String> partitionKeyRangeIds,
boolean forceRefresh) {
OpenConnectionAndInitCachesRetryPolicy openConnectionAndInitCachesRetryPolicy =
new OpenConnectionAndInitCachesRetryPolicy(this.connectionPolicy.getThrottlingRetryOptions());
return BackoffRetryUtility.executeRetry(
() -> this.getServerAddressesViaGatewayAsync(request, collectionRid, partitionKeyRangeIds, forceRefresh),
openConnectionAndInitCachesRetryPolicy);
}
private boolean notAllReplicasAvailable(AddressInformation[] addressInformations) {
return addressInformations.length < ServiceConfig.SystemReplicationPolicy.MaxReplicaSetSize;
}
private static String logAddressResolutionStart(
RxDocumentServiceRequest request,
URI targetEndpointUrl,
boolean forceRefresh,
boolean forceCollectionRoutingMapRefresh) {
if (request.requestContext.cosmosDiagnostics != null) {
return BridgeInternal.recordAddressResolutionStart(
request.requestContext.cosmosDiagnostics,
targetEndpointUrl,
forceRefresh,
forceCollectionRoutingMapRefresh);
}
return null;
}
private static void logAddressResolutionEnd(RxDocumentServiceRequest request, String identifier, String errorMessage) {
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordAddressResolutionEnd(request.requestContext.cosmosDiagnostics, identifier, errorMessage);
}
}
private static class ForcedRefreshMetadata {
private final ConcurrentHashMap<PartitionKeyRangeIdentity, Instant> lastPartitionAddressOnlyRefresh;
private Instant lastCollectionRoutingMapRefresh;
public ForcedRefreshMetadata() {
lastPartitionAddressOnlyRefresh = new ConcurrentHashMap<>();
lastCollectionRoutingMapRefresh = Instant.now();
}
public void signalCollectionRoutingMapRefresh(
PartitionKeyRangeIdentity pk,
boolean forcePartitionAddressRefresh) {
Instant nowSnapshot = Instant.now();
if (forcePartitionAddressRefresh) {
lastPartitionAddressOnlyRefresh.put(pk, nowSnapshot);
}
lastCollectionRoutingMapRefresh = nowSnapshot;
}
public void signalPartitionAddressOnlyRefresh(PartitionKeyRangeIdentity pk) {
lastPartitionAddressOnlyRefresh.put(pk, Instant.now());
}
public boolean shouldIncludeCollectionRoutingMapRefresh(PartitionKeyRangeIdentity pk) {
Instant lastPartitionAddressRefreshSnapshot = lastPartitionAddressOnlyRefresh.get(pk);
Instant lastCollectionRoutingMapRefreshSnapshot = lastCollectionRoutingMapRefresh;
if (lastPartitionAddressRefreshSnapshot == null ||
!lastPartitionAddressRefreshSnapshot.isAfter(lastCollectionRoutingMapRefreshSnapshot)) {
return false;
}
Duration durationSinceLastForcedCollectionRoutingMapRefresh =
Duration.between(lastCollectionRoutingMapRefreshSnapshot, Instant.now());
boolean returnValue = durationSinceLastForcedCollectionRoutingMapRefresh
.compareTo(minDurationBeforeEnforcingCollectionRoutingMapRefresh) >= 0;
return returnValue;
}
}
} |
it could be null for the first time when we initialize the cache | private AddressInformation[] mergeAddresses(AddressInformation[] newAddresses, AddressInformation[] cachedAddresses) {
checkNotNull(newAddresses, "Argument 'newAddresses' should not be null");
if (cachedAddresses == null) {
return newAddresses;
}
List<AddressInformation> mergedAddresses = new ArrayList<>();
Map<Uri, AddressInformation> cachedAddressMap =
Arrays
.stream(cachedAddresses)
.collect(Collectors.toMap(address -> address.getPhysicalUri(), address -> address));
for (AddressInformation addressInformation : newAddresses) {
if (cachedAddressMap.containsKey(addressInformation.getPhysicalUri())) {
mergedAddresses.add(cachedAddressMap.get(addressInformation.getPhysicalUri()));
} else {
mergedAddresses.add(addressInformation);
}
}
return mergedAddresses.toArray(new AddressInformation[mergedAddresses.size()]);
} | if (cachedAddresses == null) { | private AddressInformation[] mergeAddresses(AddressInformation[] newAddresses, AddressInformation[] cachedAddresses) {
checkNotNull(newAddresses, "Argument 'newAddresses' should not be null");
if (cachedAddresses == null) {
return newAddresses;
}
List<AddressInformation> mergedAddresses = new ArrayList<>();
Map<Uri, List<AddressInformation>> cachedAddressMap =
Arrays
.stream(cachedAddresses)
.collect(Collectors.groupingBy(AddressInformation::getPhysicalUri));
for (AddressInformation newAddress : newAddresses) {
boolean useCachedAddress = false;
if (cachedAddressMap.containsKey(newAddress.getPhysicalUri())) {
for (AddressInformation cachedAddress : cachedAddressMap.get(newAddress.getPhysicalUri())) {
if (newAddress.getProtocol() == cachedAddress.getProtocol()
&& newAddress.isPublic() == cachedAddress.isPublic()
&& newAddress.isPrimary() == cachedAddress.isPrimary()) {
useCachedAddress = true;
mergedAddresses.add(cachedAddress);
break;
}
}
}
if (!useCachedAddress) {
mergedAddresses.add(newAddress);
}
}
return mergedAddresses.toArray(new AddressInformation[mergedAddresses.size()]);
} | class GatewayAddressCache implements IAddressCache {
private final static Duration minDurationBeforeEnforcingCollectionRoutingMapRefresh = Duration.ofSeconds(30);
private final static Logger logger = LoggerFactory.getLogger(GatewayAddressCache.class);
private final static String protocolFilterFormat = "%s eq %s";
private final static int DefaultBatchSize = 50;
private final static int DefaultSuboptimalPartitionForceRefreshIntervalInSeconds = 600;
private final DiagnosticsClientContext clientContext;
private final String databaseFeedEntryUrl = PathsHelper.generatePath(ResourceType.Database, "", true);
private final URI addressEndpoint;
private final AsyncCache<PartitionKeyRangeIdentity, AddressInformation[]> serverPartitionAddressCache;
private final ConcurrentHashMap<PartitionKeyRangeIdentity, Instant> suboptimalServerPartitionTimestamps;
private final long suboptimalPartitionForceRefreshIntervalInSeconds;
private final String protocolScheme;
private final String protocolFilter;
private final IAuthorizationTokenProvider tokenProvider;
private final HashMap<String, String> defaultRequestHeaders;
private final HttpClient httpClient;
private volatile Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterPartitionAddressCache;
private volatile Instant suboptimalMasterPartitionTimestamp;
private final ConcurrentHashMap<URI, Set<PartitionKeyRangeIdentity>> serverPartitionAddressToPkRangeIdMap;
private final boolean tcpConnectionEndpointRediscoveryEnabled;
private final ConcurrentHashMap<String, ForcedRefreshMetadata> lastForcedRefreshMap;
private final GlobalEndpointManager globalEndpointManager;
private IOpenConnectionsHandler openConnectionsHandler;
private final ConnectionPolicy connectionPolicy;
private final boolean replicaAddressValidationEnabled;
public GatewayAddressCache(
DiagnosticsClientContext clientContext,
URI serviceEndpoint,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
UserAgentContainer userAgent,
HttpClient httpClient,
long suboptimalPartitionForceRefreshIntervalInSeconds,
boolean tcpConnectionEndpointRediscoveryEnabled,
ApiType apiType,
GlobalEndpointManager globalEndpointManager,
ConnectionPolicy connectionPolicy,
IOpenConnectionsHandler openConnectionsHandler) {
this.clientContext = clientContext;
try {
this.addressEndpoint = new URL(serviceEndpoint.toURL(), Paths.ADDRESS_PATH_SEGMENT).toURI();
} catch (MalformedURLException | URISyntaxException e) {
logger.error("serviceEndpoint {} is invalid", serviceEndpoint, e);
assert false;
throw new IllegalStateException(e);
}
this.tokenProvider = tokenProvider;
this.serverPartitionAddressCache = new AsyncCache<>();
this.suboptimalServerPartitionTimestamps = new ConcurrentHashMap<>();
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
this.suboptimalPartitionForceRefreshIntervalInSeconds = suboptimalPartitionForceRefreshIntervalInSeconds;
this.protocolScheme = protocol.scheme();
this.protocolFilter = String.format(GatewayAddressCache.protocolFilterFormat,
Constants.Properties.PROTOCOL,
this.protocolScheme);
this.httpClient = httpClient;
if (userAgent == null) {
userAgent = new UserAgentContainer();
}
defaultRequestHeaders = new HashMap<>();
defaultRequestHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgent.getUserAgent());
if(apiType != null) {
defaultRequestHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString());
}
defaultRequestHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION);
this.serverPartitionAddressToPkRangeIdMap = new ConcurrentHashMap<>();
this.tcpConnectionEndpointRediscoveryEnabled = tcpConnectionEndpointRediscoveryEnabled;
this.lastForcedRefreshMap = new ConcurrentHashMap<>();
this.globalEndpointManager = globalEndpointManager;
this.openConnectionsHandler = openConnectionsHandler;
this.connectionPolicy = connectionPolicy;
this.replicaAddressValidationEnabled = Configs.isReplicaAddressValidationEnabled();
}
public GatewayAddressCache(
DiagnosticsClientContext clientContext,
URI serviceEndpoint,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
UserAgentContainer userAgent,
HttpClient httpClient,
boolean tcpConnectionEndpointRediscoveryEnabled,
ApiType apiType,
GlobalEndpointManager globalEndpointManager,
ConnectionPolicy connectionPolicy,
IOpenConnectionsHandler openConnectionsHandler) {
this(clientContext,
serviceEndpoint,
protocol,
tokenProvider,
userAgent,
httpClient,
DefaultSuboptimalPartitionForceRefreshIntervalInSeconds,
tcpConnectionEndpointRediscoveryEnabled,
apiType,
globalEndpointManager,
connectionPolicy,
openConnectionsHandler);
}
@Override
public int updateAddresses(final URI serverKey) {
Objects.requireNonNull(serverKey, "expected non-null serverKey");
AtomicInteger updatedCacheEntryCount = new AtomicInteger(0);
if (this.tcpConnectionEndpointRediscoveryEnabled) {
this.serverPartitionAddressToPkRangeIdMap.computeIfPresent(serverKey, (uri, partitionKeyRangeIdentitySet) -> {
for (PartitionKeyRangeIdentity partitionKeyRangeIdentity : partitionKeyRangeIdentitySet) {
if (partitionKeyRangeIdentity.getPartitionKeyRangeId().equals(PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) {
this.masterPartitionAddressCache = null;
} else {
this.serverPartitionAddressCache.remove(partitionKeyRangeIdentity);
}
updatedCacheEntryCount.incrementAndGet();
}
return null;
});
} else {
logger.warn("tcpConnectionEndpointRediscovery is not enabled, should not reach here.");
}
return updatedCacheEntryCount.get();
}
@Override
public Mono<Utils.ValueHolder<AddressInformation[]>> tryGetAddresses(RxDocumentServiceRequest request,
PartitionKeyRangeIdentity partitionKeyRangeIdentity,
boolean forceRefreshPartitionAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
Utils.checkNotNullOrThrow(partitionKeyRangeIdentity, "partitionKeyRangeIdentity", "");
logger.debug("PartitionKeyRangeIdentity {}, forceRefreshPartitionAddresses {}",
partitionKeyRangeIdentity,
forceRefreshPartitionAddresses);
if (StringUtils.equals(partitionKeyRangeIdentity.getPartitionKeyRangeId(),
PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) {
return this.resolveMasterAsync(request, forceRefreshPartitionAddresses, request.properties)
.map(partitionKeyRangeIdentityPair -> new Utils.ValueHolder<>(partitionKeyRangeIdentityPair.getRight()));
}
evaluateCollectionRoutingMapRefreshForServerPartition(
request, partitionKeyRangeIdentity, forceRefreshPartitionAddresses);
Instant suboptimalServerPartitionTimestamp = this.suboptimalServerPartitionTimestamps.get(partitionKeyRangeIdentity);
if (suboptimalServerPartitionTimestamp != null) {
logger.debug("suboptimalServerPartitionTimestamp is {}", suboptimalServerPartitionTimestamp);
boolean forceRefreshDueToSuboptimalPartitionReplicaSet = Duration.between(suboptimalServerPartitionTimestamp, Instant.now()).getSeconds()
> this.suboptimalPartitionForceRefreshIntervalInSeconds;
if (forceRefreshDueToSuboptimalPartitionReplicaSet) {
Instant newValue = this.suboptimalServerPartitionTimestamps.computeIfPresent(partitionKeyRangeIdentity,
(key, oldVal) -> {
logger.debug("key = {}, oldValue = {}", key, oldVal);
if (suboptimalServerPartitionTimestamp.equals(oldVal)) {
return Instant.MAX;
} else {
return oldVal;
}
});
logger.debug("newValue is {}", newValue);
if (!suboptimalServerPartitionTimestamp.equals(newValue)) {
logger.debug("setting forceRefreshPartitionAddresses to true");
forceRefreshPartitionAddresses = true;
}
}
}
final boolean forceRefreshPartitionAddressesModified = forceRefreshPartitionAddresses;
if (forceRefreshPartitionAddressesModified) {
logger.debug("refresh serverPartitionAddressCache for {}", partitionKeyRangeIdentity);
for (Uri uri : request.requestContext.getFailedEndpoints()) {
uri.setUnhealthy();
}
this.serverPartitionAddressCache.refreshWithInitFunction(
partitionKeyRangeIdentity,
cachedAddresses -> this.getAddressesForRangeId(
request,
partitionKeyRangeIdentity,
true,
cachedAddresses));
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
}
Mono<Utils.ValueHolder<AddressInformation[]>> addressesObs =
this.serverPartitionAddressCache
.getAsyncWithInitFunction(
partitionKeyRangeIdentity,
null,
cachedAddresses -> this.getAddressesForRangeId(
request,
partitionKeyRangeIdentity,
false,
cachedAddresses))
.map(Utils.ValueHolder::new);
return addressesObs
.map(
addressesValueHolder -> {
if (notAllReplicasAvailable(addressesValueHolder.v)) {
if (logger.isDebugEnabled()) {
logger.debug("not all replicas available {}", JavaStreamUtils.info(addressesValueHolder.v));
}
this.suboptimalServerPartitionTimestamps.putIfAbsent(partitionKeyRangeIdentity, Instant.now());
}
if (Arrays
.stream(addressesValueHolder.v)
.anyMatch(addressInformation -> addressInformation.getPhysicalUri().shouldRefreshHealthStatus())) {
logger.info("refresh cache due to address uri in unhealthy status");
this.serverPartitionAddressCache.refreshWithInitFunction(
partitionKeyRangeIdentity,
cachedAddresses -> this.getAddressesForRangeId(
request,
partitionKeyRangeIdentity,
true,
cachedAddresses));
}
return addressesValueHolder;
})
.onErrorResume(ex -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(ex);
CosmosException dce = Utils.as(unwrappedException, CosmosException.class);
if (dce == null) {
logger.error("unexpected failure", ex);
if (forceRefreshPartitionAddressesModified) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
}
return Mono.error(unwrappedException);
} else {
logger.debug("tryGetAddresses dce", dce);
if (Exceptions.isStatusCode(dce, HttpConstants.StatusCodes.NOTFOUND) ||
Exceptions.isStatusCode(dce, HttpConstants.StatusCodes.GONE) ||
Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE)) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
logger.debug("tryGetAddresses: inner onErrorResumeNext return null", dce);
return Mono.just(new Utils.ValueHolder<>(null));
}
return Mono.error(unwrappedException);
}
});
}
@Override
public void setOpenConnectionsHandler(IOpenConnectionsHandler openConnectionsHandler) {
this.openConnectionsHandler = openConnectionsHandler;
}
public Mono<List<Address>> getServerAddressesViaGatewayAsync(
RxDocumentServiceRequest request,
String collectionRid,
List<String> partitionKeyRangeIds,
boolean forceRefresh) {
if (logger.isDebugEnabled()) {
logger.debug("getServerAddressesViaGatewayAsync collectionRid {}, partitionKeyRangeIds {}", collectionRid,
JavaStreamUtils.toString(partitionKeyRangeIds, ","));
}
request.setAddressRefresh(true, forceRefresh);
String entryUrl = PathsHelper.generatePath(ResourceType.Document, collectionRid, true);
HashMap<String, String> addressQuery = new HashMap<>();
addressQuery.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl));
HashMap<String, String> headers = new HashMap<>(defaultRequestHeaders);
if (forceRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, "true");
}
if (request.forceCollectionRoutingMapRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_COLLECTION_ROUTING_MAP_REFRESH, "true");
}
addressQuery.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter));
addressQuery.put(HttpConstants.QueryStrings.PARTITION_KEY_RANGE_IDS, String.join(",", partitionKeyRangeIds));
headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
String token = null;
try {
token = this.tokenProvider.getUserAuthorizationToken(
collectionRid,
ResourceType.Document,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
request.properties);
} catch (UnauthorizedException e) {
if (logger.isDebugEnabled()) {
logger.debug("User doesn't have resource token for collection rid {}", collectionRid);
}
}
if (token == null && request.getIsNameBased()) {
String collectionAltLink = PathsHelper.getCollectionPath(request.getResourceAddress());
token = this.tokenProvider.getUserAuthorizationToken(
collectionAltLink,
ResourceType.Document,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
request.properties);
}
token = HttpUtils.urlEncode(token);
headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, token);
}
URI targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(addressQuery));
String identifier = logAddressResolutionStart(
request, targetEndpoint, forceRefresh, request.forceCollectionRoutingMapRefresh);
HttpHeaders httpHeaders = new HttpHeaders(headers);
Instant addressCallStartTime = Instant.now();
HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders);
Mono<HttpResponse> httpResponseMono;
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
httpResponseMono = this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()));
} else {
httpResponseMono = tokenProvider
.populateAuthorizationHeader(httpHeaders)
.flatMap(valueHttpHeaders -> this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds())));
}
Mono<RxDocumentServiceResponse> dsrObs = HttpClientUtils.parseResponseAsync(request, clientContext, httpResponseMono, httpRequest);
return dsrObs.map(
dsr -> {
MetadataDiagnosticsContext metadataDiagnosticsContext =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (metadataDiagnosticsContext != null) {
Instant addressCallEndTime = Instant.now();
MetadataDiagnostics metaDataDiagnostic = new MetadataDiagnostics(addressCallStartTime,
addressCallEndTime,
MetadataType.SERVER_ADDRESS_LOOKUP);
metadataDiagnosticsContext.addMetaDataDiagnostic(metaDataDiagnostic);
}
if (logger.isDebugEnabled()) {
logger.debug("getServerAddressesViaGatewayAsync deserializes result");
}
logAddressResolutionEnd(request, identifier, null);
return dsr.getQueryResponse(null, Address.class);
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
logAddressResolutionEnd(request, identifier, unwrappedException.toString());
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(
request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, this.globalEndpointManager);
}
return Mono.error(dce);
});
}
public void dispose() {
}
private Mono<Pair<PartitionKeyRangeIdentity, AddressInformation[]>> resolveMasterAsync(RxDocumentServiceRequest request, boolean forceRefresh, Map<String, Object> properties) {
logger.debug("resolveMasterAsync forceRefresh: {}", forceRefresh);
Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterAddressAndRangeInitial = this.masterPartitionAddressCache;
forceRefresh = forceRefresh ||
(masterAddressAndRangeInitial != null &&
notAllReplicasAvailable(masterAddressAndRangeInitial.getRight()) &&
Duration.between(this.suboptimalMasterPartitionTimestamp, Instant.now()).getSeconds() > this.suboptimalPartitionForceRefreshIntervalInSeconds);
if (forceRefresh || this.masterPartitionAddressCache == null) {
Mono<List<Address>> masterReplicaAddressesObs = this.getMasterAddressesViaGatewayAsync(
request,
ResourceType.Database,
null,
databaseFeedEntryUrl,
forceRefresh,
false,
properties);
return masterReplicaAddressesObs.map(
masterAddresses -> {
Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterAddressAndRangeRes =
this.toPartitionAddressAndRange("", masterAddresses);
this.masterPartitionAddressCache = masterAddressAndRangeRes;
if (notAllReplicasAvailable(masterAddressAndRangeRes.getRight())
&& this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) {
this.suboptimalMasterPartitionTimestamp = Instant.now();
} else {
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
}
return masterPartitionAddressCache;
})
.doOnError(
e -> {
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
});
} else {
if (notAllReplicasAvailable(masterAddressAndRangeInitial.getRight())
&& this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) {
this.suboptimalMasterPartitionTimestamp = Instant.now();
}
return Mono.just(masterAddressAndRangeInitial);
}
}
private void evaluateCollectionRoutingMapRefreshForServerPartition(
RxDocumentServiceRequest request,
PartitionKeyRangeIdentity pkRangeIdentity,
boolean forceRefreshPartitionAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
validatePkRangeIdentity(pkRangeIdentity);
String collectionRid = pkRangeIdentity.getCollectionRid();
String partitionKeyRangeId = pkRangeIdentity.getPartitionKeyRangeId();
if (forceRefreshPartitionAddresses) {
ForcedRefreshMetadata forcedRefreshMetadata = this.lastForcedRefreshMap.computeIfAbsent(
collectionRid,
(colRid) -> new ForcedRefreshMetadata());
if (request.forceCollectionRoutingMapRefresh) {
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
true);
} else if (forcedRefreshMetadata.shouldIncludeCollectionRoutingMapRefresh(pkRangeIdentity)) {
request.forceCollectionRoutingMapRefresh = true;
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
true);
} else {
forcedRefreshMetadata.signalPartitionAddressOnlyRefresh(pkRangeIdentity);
}
} else if (request.forceCollectionRoutingMapRefresh) {
ForcedRefreshMetadata forcedRefreshMetadata = this.lastForcedRefreshMap.computeIfAbsent(
collectionRid,
(colRid) -> new ForcedRefreshMetadata());
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
false);
}
logger.debug("evaluateCollectionRoutingMapRefreshForServerPartition collectionRid {}, partitionKeyRangeId {},"
+ " " +
"forceRefreshPartitionAddresses {}, forceCollectionRoutingMapRefresh {}",
collectionRid,
partitionKeyRangeId,
forceRefreshPartitionAddresses,
request.forceCollectionRoutingMapRefresh);
}
private void validatePkRangeIdentity(PartitionKeyRangeIdentity pkRangeIdentity) {
Utils.checkNotNullOrThrow(pkRangeIdentity, "pkRangeId", "");
Utils.checkNotNullOrThrow(
pkRangeIdentity.getCollectionRid(),
"pkRangeId.getCollectionRid()",
"");
Utils.checkNotNullOrThrow(
pkRangeIdentity.getPartitionKeyRangeId(),
"pkRangeId.getPartitionKeyRangeId()",
"");
}
private Mono<AddressInformation[]> getAddressesForRangeId(
RxDocumentServiceRequest request,
PartitionKeyRangeIdentity pkRangeIdentity,
boolean forceRefresh,
AddressInformation[] cachedAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
validatePkRangeIdentity(pkRangeIdentity);
String collectionRid = pkRangeIdentity.getCollectionRid();
String partitionKeyRangeId = pkRangeIdentity.getPartitionKeyRangeId();
logger.debug(
"getAddressesForRangeId collectionRid {}, partitionKeyRangeId {}, forceRefresh {}",
collectionRid,
partitionKeyRangeId,
forceRefresh);
Mono<List<Address>> addressResponse = this.getServerAddressesViaGatewayAsync(request, collectionRid, Collections.singletonList(partitionKeyRangeId), forceRefresh);
Mono<List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>>> addressInfos =
addressResponse.map(
addresses -> {
if (logger.isDebugEnabled()) {
logger.debug("addresses from getServerAddressesViaGatewayAsync in getAddressesForRangeId {}",
JavaStreamUtils.info(addresses));
}
return addresses
.stream()
.filter(addressInfo -> this.protocolScheme.equals(addressInfo.getProtocolScheme()))
.collect(Collectors.groupingBy(Address::getParitionKeyRangeId))
.values()
.stream()
.map(groupedAddresses -> toPartitionAddressAndRange(collectionRid, addresses))
.collect(Collectors.toList());
});
Mono<List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>>> result =
addressInfos
.map(addressInfo -> addressInfo.stream()
.filter(a -> StringUtils.equals(a.getLeft().getPartitionKeyRangeId(), partitionKeyRangeId))
.collect(Collectors.toList()));
return result
.flatMap(
list -> {
if (logger.isDebugEnabled()) {
logger.debug("getAddressesForRangeId flatMap got result {}", JavaStreamUtils.info(list));
}
if (list.isEmpty()) {
String errorMessage = String.format(
RMResources.PartitionKeyRangeNotFound,
partitionKeyRangeId,
collectionRid);
PartitionKeyRangeGoneException e = new PartitionKeyRangeGoneException(errorMessage);
BridgeInternal.setResourceAddress(e, collectionRid);
return Mono.error(e);
} else {
AddressInformation[] mergedAddresses = this.mergeAddresses(list.get(0).getRight(), cachedAddresses);
for (AddressInformation address : mergedAddresses) {
address.getPhysicalUri().setRefreshed();
}
if (this.replicaAddressValidationEnabled) {
this.validateReplicaAddresses(mergedAddresses);
}
return Mono.just(mergedAddresses);
}
})
.doOnError(e -> logger.debug("getAddressesForRangeId", e));
}
public Mono<List<Address>> getMasterAddressesViaGatewayAsync(
RxDocumentServiceRequest request,
ResourceType resourceType,
String resourceAddress,
String entryUrl,
boolean forceRefresh,
boolean useMasterCollectionResolver,
Map<String, Object> properties) {
logger.debug("getMasterAddressesViaGatewayAsync " +
"resourceType {}, " +
"resourceAddress {}, " +
"entryUrl {}, " +
"forceRefresh {}, " +
"useMasterCollectionResolver {}",
resourceType,
resourceAddress,
entryUrl,
forceRefresh,
useMasterCollectionResolver
);
request.setAddressRefresh(true, forceRefresh);
HashMap<String, String> queryParameters = new HashMap<>();
queryParameters.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl));
HashMap<String, String> headers = new HashMap<>(defaultRequestHeaders);
if (forceRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, "true");
}
if (useMasterCollectionResolver) {
headers.put(HttpConstants.HttpHeaders.USE_MASTER_COLLECTION_RESOLVER, "true");
}
if(request.forceCollectionRoutingMapRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_COLLECTION_ROUTING_MAP_REFRESH, "true");
}
queryParameters.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter));
headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
String token = this.tokenProvider.getUserAuthorizationToken(
resourceAddress,
resourceType,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
properties);
headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, HttpUtils.urlEncode(token));
}
URI targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(queryParameters));
String identifier = logAddressResolutionStart(
request, targetEndpoint, true, true);
HttpHeaders defaultHttpHeaders = new HttpHeaders(headers);
HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), defaultHttpHeaders);
Instant addressCallStartTime = Instant.now();
Mono<HttpResponse> httpResponseMono;
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
httpResponseMono = this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()));
} else {
httpResponseMono = tokenProvider
.populateAuthorizationHeader(defaultHttpHeaders)
.flatMap(valueHttpHeaders -> this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds())));
}
Mono<RxDocumentServiceResponse> dsrObs = HttpClientUtils.parseResponseAsync(request, this.clientContext, httpResponseMono, httpRequest);
return dsrObs.map(
dsr -> {
MetadataDiagnosticsContext metadataDiagnosticsContext =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (metadataDiagnosticsContext != null) {
Instant addressCallEndTime = Instant.now();
MetadataDiagnostics metaDataDiagnostic = new MetadataDiagnostics(addressCallStartTime,
addressCallEndTime,
MetadataType.MASTER_ADDRESS_LOOK_UP);
metadataDiagnosticsContext.addMetaDataDiagnostic(metaDataDiagnostic);
}
logAddressResolutionEnd(request, identifier, null);
return dsr.getQueryResponse(null, Address.class);
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
logAddressResolutionEnd(request, identifier, unwrappedException.toString());
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(
request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, this.globalEndpointManager);
}
return Mono.error(dce);
});
}
/***
* merge the new addresses get back from gateway with the cached addresses.
* If the address is being returned from gateway again, then keep using the cached addressInformation object
* If it is a new address being returned, then use the new addressInformation object.
*
* @param newAddresses the latest addresses being returned from gateway.
* @param cachedAddresses the cached addresses.
*
* @return the merged addresses.
*/
private void validateReplicaAddresses(AddressInformation[] addresses) {
checkNotNull(addresses, "Argument 'addresses' can not be null");
List<Uri> addressesNeedToValidation =
Arrays
.stream(addresses)
.map(address -> address.getPhysicalUri())
.filter(addressUri -> addressUri.getHealthStatus() == Uri.HealthStatus.UnhealthyPending)
.collect(Collectors.toList());
if (addressesNeedToValidation.size() > 0) {
this.openConnectionsHandler
.openConnections(addressesNeedToValidation)
.subscribeOn(CosmosSchedulers.OPEN_CONNECTIONS_BOUNDED_ELASTIC)
.subscribe();
}
}
private Pair<PartitionKeyRangeIdentity, AddressInformation[]> toPartitionAddressAndRange(String collectionRid, List<Address> addresses) {
if (logger.isDebugEnabled()) {
logger.debug("toPartitionAddressAndRange");
}
Address address = addresses.get(0);
PartitionKeyRangeIdentity partitionKeyRangeIdentity = new PartitionKeyRangeIdentity(collectionRid, address.getParitionKeyRangeId());
AddressInformation[] addressInfos =
addresses
.stream()
.map(addr -> GatewayAddressCache.toAddressInformation(addr))
.collect(Collectors.toList())
.toArray(new AddressInformation[addresses.size()]);
if (this.tcpConnectionEndpointRediscoveryEnabled) {
for (AddressInformation addressInfo : addressInfos) {
if (logger.isDebugEnabled()) {
logger.debug(
"Added address to serverPartitionAddressToPkRangeIdMap: ({\"partitionKeyRangeIdentity\":{},\"address\":{}})",
partitionKeyRangeIdentity,
addressInfo);
}
this.serverPartitionAddressToPkRangeIdMap.compute(addressInfo.getServerKey(), (serverKey, partitionKeyRangeIdentitySet) -> {
if (partitionKeyRangeIdentitySet == null) {
partitionKeyRangeIdentitySet = ConcurrentHashMap.newKeySet();
}
partitionKeyRangeIdentitySet.add(partitionKeyRangeIdentity);
return partitionKeyRangeIdentitySet;
});
}
}
return Pair.of(partitionKeyRangeIdentity, addressInfos);
}
private static AddressInformation toAddressInformation(Address address) {
return new AddressInformation(true, address.isPrimary(), address.getPhyicalUri(), address.getProtocolScheme());
}
public Flux<OpenConnectionResponse> openConnectionsAndInitCaches(
DocumentCollection collection,
List<PartitionKeyRangeIdentity> partitionKeyRangeIdentities) {
checkNotNull(collection, "Argument 'collection' should not be null");
checkNotNull(partitionKeyRangeIdentities, "Argument 'partitionKeyRangeIdentities' should not be null");
if (logger.isDebugEnabled()) {
logger.debug(
"openConnectionsAndInitCaches collection: {}, partitionKeyRangeIdentities: {}",
collection.getResourceId(),
JavaStreamUtils.toString(partitionKeyRangeIdentities, ","));
}
List<Flux<List<Address>>> tasks = new ArrayList<>();
int batchSize = GatewayAddressCache.DefaultBatchSize;
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this.clientContext,
OperationType.Read,
collection.getResourceId(),
ResourceType.DocumentCollection,
Collections.emptyMap());
for (int i = 0; i < partitionKeyRangeIdentities.size(); i += batchSize) {
int endIndex = i + batchSize;
endIndex = Math.min(endIndex, partitionKeyRangeIdentities.size());
tasks.add(
this.getServerAddressesViaGatewayWithRetry(
request,
collection.getResourceId(),
partitionKeyRangeIdentities
.subList(i, endIndex)
.stream()
.map(PartitionKeyRangeIdentity::getPartitionKeyRangeId)
.collect(Collectors.toList()),
false).flux());
}
return Flux.concat(tasks)
.flatMap(list -> {
List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>> addressInfos =
list.stream()
.filter(addressInfo -> this.protocolScheme.equals(addressInfo.getProtocolScheme()))
.collect(Collectors.groupingBy(Address::getParitionKeyRangeId))
.values()
.stream().map(addresses -> toPartitionAddressAndRange(collection.getResourceId(), addresses))
.collect(Collectors.toList());
return Flux.fromIterable(addressInfos)
.flatMap(addressInfo -> {
this.serverPartitionAddressCache.set(addressInfo.getLeft(), addressInfo.getRight());
if (this.openConnectionsHandler != null) {
return this.openConnectionsHandler.openConnections(
Arrays
.stream(addressInfo.getRight())
.map(addressInformation -> addressInformation.getPhysicalUri())
.collect(Collectors.toList()));
}
logger.info("OpenConnectionHandler is null, can not open connections");
return Flux.empty();
});
});
}
private Mono<List<Address>> getServerAddressesViaGatewayWithRetry(
RxDocumentServiceRequest request,
String collectionRid,
List<String> partitionKeyRangeIds,
boolean forceRefresh) {
OpenConnectionAndInitCachesRetryPolicy openConnectionAndInitCachesRetryPolicy =
new OpenConnectionAndInitCachesRetryPolicy(this.connectionPolicy.getThrottlingRetryOptions());
return BackoffRetryUtility.executeRetry(
() -> this.getServerAddressesViaGatewayAsync(request, collectionRid, partitionKeyRangeIds, forceRefresh),
openConnectionAndInitCachesRetryPolicy);
}
private boolean notAllReplicasAvailable(AddressInformation[] addressInformations) {
return addressInformations.length < ServiceConfig.SystemReplicationPolicy.MaxReplicaSetSize;
}
private static String logAddressResolutionStart(
RxDocumentServiceRequest request,
URI targetEndpointUrl,
boolean forceRefresh,
boolean forceCollectionRoutingMapRefresh) {
if (request.requestContext.cosmosDiagnostics != null) {
return BridgeInternal.recordAddressResolutionStart(
request.requestContext.cosmosDiagnostics,
targetEndpointUrl,
forceRefresh,
forceCollectionRoutingMapRefresh);
}
return null;
}
private static void logAddressResolutionEnd(RxDocumentServiceRequest request, String identifier, String errorMessage) {
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordAddressResolutionEnd(request.requestContext.cosmosDiagnostics, identifier, errorMessage);
}
}
private static class ForcedRefreshMetadata {
private final ConcurrentHashMap<PartitionKeyRangeIdentity, Instant> lastPartitionAddressOnlyRefresh;
private Instant lastCollectionRoutingMapRefresh;
public ForcedRefreshMetadata() {
lastPartitionAddressOnlyRefresh = new ConcurrentHashMap<>();
lastCollectionRoutingMapRefresh = Instant.now();
}
public void signalCollectionRoutingMapRefresh(
PartitionKeyRangeIdentity pk,
boolean forcePartitionAddressRefresh) {
Instant nowSnapshot = Instant.now();
if (forcePartitionAddressRefresh) {
lastPartitionAddressOnlyRefresh.put(pk, nowSnapshot);
}
lastCollectionRoutingMapRefresh = nowSnapshot;
}
public void signalPartitionAddressOnlyRefresh(PartitionKeyRangeIdentity pk) {
lastPartitionAddressOnlyRefresh.put(pk, Instant.now());
}
public boolean shouldIncludeCollectionRoutingMapRefresh(PartitionKeyRangeIdentity pk) {
Instant lastPartitionAddressRefreshSnapshot = lastPartitionAddressOnlyRefresh.get(pk);
Instant lastCollectionRoutingMapRefreshSnapshot = lastCollectionRoutingMapRefresh;
if (lastPartitionAddressRefreshSnapshot == null ||
!lastPartitionAddressRefreshSnapshot.isAfter(lastCollectionRoutingMapRefreshSnapshot)) {
return false;
}
Duration durationSinceLastForcedCollectionRoutingMapRefresh =
Duration.between(lastCollectionRoutingMapRefreshSnapshot, Instant.now());
boolean returnValue = durationSinceLastForcedCollectionRoutingMapRefresh
.compareTo(minDurationBeforeEnforcingCollectionRoutingMapRefresh) >= 0;
return returnValue;
}
}
} | class GatewayAddressCache implements IAddressCache {
private final static Duration minDurationBeforeEnforcingCollectionRoutingMapRefresh = Duration.ofSeconds(30);
private final static Logger logger = LoggerFactory.getLogger(GatewayAddressCache.class);
private final static String protocolFilterFormat = "%s eq %s";
private final static int DefaultBatchSize = 50;
private final static int DefaultSuboptimalPartitionForceRefreshIntervalInSeconds = 600;
private final DiagnosticsClientContext clientContext;
private final String databaseFeedEntryUrl = PathsHelper.generatePath(ResourceType.Database, "", true);
private final URI addressEndpoint;
private final AsyncCacheNonBlocking<PartitionKeyRangeIdentity, AddressInformation[]> serverPartitionAddressCache;
private final ConcurrentHashMap<PartitionKeyRangeIdentity, Instant> suboptimalServerPartitionTimestamps;
private final long suboptimalPartitionForceRefreshIntervalInSeconds;
private final String protocolScheme;
private final String protocolFilter;
private final IAuthorizationTokenProvider tokenProvider;
private final HashMap<String, String> defaultRequestHeaders;
private final HttpClient httpClient;
private volatile Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterPartitionAddressCache;
private volatile Instant suboptimalMasterPartitionTimestamp;
private final ConcurrentHashMap<URI, Set<PartitionKeyRangeIdentity>> serverPartitionAddressToPkRangeIdMap;
private final boolean tcpConnectionEndpointRediscoveryEnabled;
private final ConcurrentHashMap<String, ForcedRefreshMetadata> lastForcedRefreshMap;
private final GlobalEndpointManager globalEndpointManager;
private IOpenConnectionsHandler openConnectionsHandler;
private final ConnectionPolicy connectionPolicy;
private final boolean replicaAddressValidationEnabled;
public GatewayAddressCache(
DiagnosticsClientContext clientContext,
URI serviceEndpoint,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
UserAgentContainer userAgent,
HttpClient httpClient,
long suboptimalPartitionForceRefreshIntervalInSeconds,
boolean tcpConnectionEndpointRediscoveryEnabled,
ApiType apiType,
GlobalEndpointManager globalEndpointManager,
ConnectionPolicy connectionPolicy,
IOpenConnectionsHandler openConnectionsHandler) {
this.clientContext = clientContext;
try {
this.addressEndpoint = new URL(serviceEndpoint.toURL(), Paths.ADDRESS_PATH_SEGMENT).toURI();
} catch (MalformedURLException | URISyntaxException e) {
logger.error("serviceEndpoint {} is invalid", serviceEndpoint, e);
assert false;
throw new IllegalStateException(e);
}
this.tokenProvider = tokenProvider;
this.serverPartitionAddressCache = new AsyncCacheNonBlocking<>();
this.suboptimalServerPartitionTimestamps = new ConcurrentHashMap<>();
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
this.suboptimalPartitionForceRefreshIntervalInSeconds = suboptimalPartitionForceRefreshIntervalInSeconds;
this.protocolScheme = protocol.scheme();
this.protocolFilter = String.format(GatewayAddressCache.protocolFilterFormat,
Constants.Properties.PROTOCOL,
this.protocolScheme);
this.httpClient = httpClient;
if (userAgent == null) {
userAgent = new UserAgentContainer();
}
defaultRequestHeaders = new HashMap<>();
defaultRequestHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgent.getUserAgent());
if(apiType != null) {
defaultRequestHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString());
}
defaultRequestHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION);
this.serverPartitionAddressToPkRangeIdMap = new ConcurrentHashMap<>();
this.tcpConnectionEndpointRediscoveryEnabled = tcpConnectionEndpointRediscoveryEnabled;
this.lastForcedRefreshMap = new ConcurrentHashMap<>();
this.globalEndpointManager = globalEndpointManager;
this.openConnectionsHandler = openConnectionsHandler;
this.connectionPolicy = connectionPolicy;
this.replicaAddressValidationEnabled = Configs.isReplicaAddressValidationEnabled();
}
public GatewayAddressCache(
DiagnosticsClientContext clientContext,
URI serviceEndpoint,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
UserAgentContainer userAgent,
HttpClient httpClient,
boolean tcpConnectionEndpointRediscoveryEnabled,
ApiType apiType,
GlobalEndpointManager globalEndpointManager,
ConnectionPolicy connectionPolicy,
IOpenConnectionsHandler openConnectionsHandler) {
this(clientContext,
serviceEndpoint,
protocol,
tokenProvider,
userAgent,
httpClient,
DefaultSuboptimalPartitionForceRefreshIntervalInSeconds,
tcpConnectionEndpointRediscoveryEnabled,
apiType,
globalEndpointManager,
connectionPolicy,
openConnectionsHandler);
}
@Override
public int updateAddresses(final URI serverKey) {
Objects.requireNonNull(serverKey, "expected non-null serverKey");
AtomicInteger updatedCacheEntryCount = new AtomicInteger(0);
if (this.tcpConnectionEndpointRediscoveryEnabled) {
this.serverPartitionAddressToPkRangeIdMap.computeIfPresent(serverKey, (uri, partitionKeyRangeIdentitySet) -> {
for (PartitionKeyRangeIdentity partitionKeyRangeIdentity : partitionKeyRangeIdentitySet) {
if (partitionKeyRangeIdentity.getPartitionKeyRangeId().equals(PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) {
this.masterPartitionAddressCache = null;
} else {
this.serverPartitionAddressCache.remove(partitionKeyRangeIdentity);
}
updatedCacheEntryCount.incrementAndGet();
}
return null;
});
} else {
logger.warn("tcpConnectionEndpointRediscovery is not enabled, should not reach here.");
}
return updatedCacheEntryCount.get();
}
@Override
public Mono<Utils.ValueHolder<AddressInformation[]>> tryGetAddresses(RxDocumentServiceRequest request,
PartitionKeyRangeIdentity partitionKeyRangeIdentity,
boolean forceRefreshPartitionAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
Utils.checkNotNullOrThrow(partitionKeyRangeIdentity, "partitionKeyRangeIdentity", "");
logger.debug("PartitionKeyRangeIdentity {}, forceRefreshPartitionAddresses {}",
partitionKeyRangeIdentity,
forceRefreshPartitionAddresses);
if (StringUtils.equals(partitionKeyRangeIdentity.getPartitionKeyRangeId(),
PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) {
return this.resolveMasterAsync(request, forceRefreshPartitionAddresses, request.properties)
.map(partitionKeyRangeIdentityPair -> new Utils.ValueHolder<>(partitionKeyRangeIdentityPair.getRight()));
}
evaluateCollectionRoutingMapRefreshForServerPartition(
request, partitionKeyRangeIdentity, forceRefreshPartitionAddresses);
Instant suboptimalServerPartitionTimestamp = this.suboptimalServerPartitionTimestamps.get(partitionKeyRangeIdentity);
if (suboptimalServerPartitionTimestamp != null) {
logger.debug("suboptimalServerPartitionTimestamp is {}", suboptimalServerPartitionTimestamp);
boolean forceRefreshDueToSuboptimalPartitionReplicaSet = Duration.between(suboptimalServerPartitionTimestamp, Instant.now()).getSeconds()
> this.suboptimalPartitionForceRefreshIntervalInSeconds;
if (forceRefreshDueToSuboptimalPartitionReplicaSet) {
Instant newValue = this.suboptimalServerPartitionTimestamps.computeIfPresent(partitionKeyRangeIdentity,
(key, oldVal) -> {
logger.debug("key = {}, oldValue = {}", key, oldVal);
if (suboptimalServerPartitionTimestamp.equals(oldVal)) {
return Instant.MAX;
} else {
return oldVal;
}
});
logger.debug("newValue is {}", newValue);
if (!suboptimalServerPartitionTimestamp.equals(newValue)) {
logger.debug("setting forceRefreshPartitionAddresses to true");
forceRefreshPartitionAddresses = true;
}
}
}
final boolean forceRefreshPartitionAddressesModified = forceRefreshPartitionAddresses;
if (forceRefreshPartitionAddressesModified) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
}
Mono<Utils.ValueHolder<AddressInformation[]>> addressesObs =
this.serverPartitionAddressCache
.getAsync(
partitionKeyRangeIdentity,
cachedAddresses -> this.getAddressesForRangeId(
request,
partitionKeyRangeIdentity,
forceRefreshPartitionAddressesModified,
cachedAddresses),
cachedAddresses -> {
for (Uri failedEndpoints : request.requestContext.getFailedEndpoints()) {
failedEndpoints.setUnhealthy();
}
return forceRefreshPartitionAddressesModified
|| Arrays.stream(cachedAddresses).anyMatch(addressInformation -> addressInformation.getPhysicalUri().shouldRefreshHealthStatus());
})
.map(Utils.ValueHolder::new);
return addressesObs
.map(addressesValueHolder -> {
if (notAllReplicasAvailable(addressesValueHolder.v)) {
if (logger.isDebugEnabled()) {
logger.debug("not all replicas available {}", JavaStreamUtils.info(addressesValueHolder.v));
}
this.suboptimalServerPartitionTimestamps.putIfAbsent(partitionKeyRangeIdentity, Instant.now());
}
return addressesValueHolder;
})
.onErrorResume(ex -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(ex);
CosmosException dce = Utils.as(unwrappedException, CosmosException.class);
if (dce == null) {
logger.error("unexpected failure", ex);
if (forceRefreshPartitionAddressesModified) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
}
return Mono.error(unwrappedException);
} else {
logger.debug("tryGetAddresses dce", dce);
if (Exceptions.isNotFound(dce) ||
Exceptions.isGone(dce) ||
Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE)) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
logger.debug("tryGetAddresses: inner onErrorResumeNext return null", dce);
return Mono.just(new Utils.ValueHolder<>(null));
}
return Mono.error(unwrappedException);
}
});
}
@Override
public void setOpenConnectionsHandler(IOpenConnectionsHandler openConnectionsHandler) {
this.openConnectionsHandler = openConnectionsHandler;
}
public Mono<List<Address>> getServerAddressesViaGatewayAsync(
RxDocumentServiceRequest request,
String collectionRid,
List<String> partitionKeyRangeIds,
boolean forceRefresh) {
if (logger.isDebugEnabled()) {
logger.debug("getServerAddressesViaGatewayAsync collectionRid {}, partitionKeyRangeIds {}", collectionRid,
JavaStreamUtils.toString(partitionKeyRangeIds, ","));
}
request.setAddressRefresh(true, forceRefresh);
String entryUrl = PathsHelper.generatePath(ResourceType.Document, collectionRid, true);
HashMap<String, String> addressQuery = new HashMap<>();
addressQuery.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl));
HashMap<String, String> headers = new HashMap<>(defaultRequestHeaders);
if (forceRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, "true");
}
if (request.forceCollectionRoutingMapRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_COLLECTION_ROUTING_MAP_REFRESH, "true");
}
addressQuery.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter));
addressQuery.put(HttpConstants.QueryStrings.PARTITION_KEY_RANGE_IDS, String.join(",", partitionKeyRangeIds));
headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
String token = null;
try {
token = this.tokenProvider.getUserAuthorizationToken(
collectionRid,
ResourceType.Document,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
request.properties);
} catch (UnauthorizedException e) {
if (logger.isDebugEnabled()) {
logger.debug("User doesn't have resource token for collection rid {}", collectionRid);
}
}
if (token == null && request.getIsNameBased()) {
String collectionAltLink = PathsHelper.getCollectionPath(request.getResourceAddress());
token = this.tokenProvider.getUserAuthorizationToken(
collectionAltLink,
ResourceType.Document,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
request.properties);
}
token = HttpUtils.urlEncode(token);
headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, token);
}
URI targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(addressQuery));
String identifier = logAddressResolutionStart(
request, targetEndpoint, forceRefresh, request.forceCollectionRoutingMapRefresh);
HttpHeaders httpHeaders = new HttpHeaders(headers);
Instant addressCallStartTime = Instant.now();
HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders);
Mono<HttpResponse> httpResponseMono;
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
httpResponseMono = this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()));
} else {
httpResponseMono = tokenProvider
.populateAuthorizationHeader(httpHeaders)
.flatMap(valueHttpHeaders -> this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds())));
}
Mono<RxDocumentServiceResponse> dsrObs = HttpClientUtils.parseResponseAsync(request, clientContext, httpResponseMono, httpRequest);
return dsrObs.map(
dsr -> {
MetadataDiagnosticsContext metadataDiagnosticsContext =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (metadataDiagnosticsContext != null) {
Instant addressCallEndTime = Instant.now();
MetadataDiagnostics metaDataDiagnostic = new MetadataDiagnostics(addressCallStartTime,
addressCallEndTime,
MetadataType.SERVER_ADDRESS_LOOKUP);
metadataDiagnosticsContext.addMetaDataDiagnostic(metaDataDiagnostic);
}
if (logger.isDebugEnabled()) {
logger.debug("getServerAddressesViaGatewayAsync deserializes result");
}
logAddressResolutionEnd(request, identifier, null);
return dsr.getQueryResponse(null, Address.class);
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
logAddressResolutionEnd(request, identifier, unwrappedException.toString());
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(
request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, this.globalEndpointManager);
}
return Mono.error(dce);
});
}
public void dispose() {
}
private Mono<Pair<PartitionKeyRangeIdentity, AddressInformation[]>> resolveMasterAsync(RxDocumentServiceRequest request, boolean forceRefresh, Map<String, Object> properties) {
logger.debug("resolveMasterAsync forceRefresh: {}", forceRefresh);
Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterAddressAndRangeInitial = this.masterPartitionAddressCache;
forceRefresh = forceRefresh ||
(masterAddressAndRangeInitial != null &&
notAllReplicasAvailable(masterAddressAndRangeInitial.getRight()) &&
Duration.between(this.suboptimalMasterPartitionTimestamp, Instant.now()).getSeconds() > this.suboptimalPartitionForceRefreshIntervalInSeconds);
if (forceRefresh || this.masterPartitionAddressCache == null) {
Mono<List<Address>> masterReplicaAddressesObs = this.getMasterAddressesViaGatewayAsync(
request,
ResourceType.Database,
null,
databaseFeedEntryUrl,
forceRefresh,
false,
properties);
return masterReplicaAddressesObs.map(
masterAddresses -> {
Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterAddressAndRangeRes =
this.toPartitionAddressAndRange("", masterAddresses);
this.masterPartitionAddressCache = masterAddressAndRangeRes;
if (notAllReplicasAvailable(masterAddressAndRangeRes.getRight())
&& this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) {
this.suboptimalMasterPartitionTimestamp = Instant.now();
} else {
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
}
return masterPartitionAddressCache;
})
.doOnError(
e -> {
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
});
} else {
if (notAllReplicasAvailable(masterAddressAndRangeInitial.getRight())
&& this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) {
this.suboptimalMasterPartitionTimestamp = Instant.now();
}
return Mono.just(masterAddressAndRangeInitial);
}
}
private void evaluateCollectionRoutingMapRefreshForServerPartition(
RxDocumentServiceRequest request,
PartitionKeyRangeIdentity pkRangeIdentity,
boolean forceRefreshPartitionAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
validatePkRangeIdentity(pkRangeIdentity);
String collectionRid = pkRangeIdentity.getCollectionRid();
String partitionKeyRangeId = pkRangeIdentity.getPartitionKeyRangeId();
if (forceRefreshPartitionAddresses) {
ForcedRefreshMetadata forcedRefreshMetadata = this.lastForcedRefreshMap.computeIfAbsent(
collectionRid,
(colRid) -> new ForcedRefreshMetadata());
if (request.forceCollectionRoutingMapRefresh) {
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
true);
} else if (forcedRefreshMetadata.shouldIncludeCollectionRoutingMapRefresh(pkRangeIdentity)) {
request.forceCollectionRoutingMapRefresh = true;
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
true);
} else {
forcedRefreshMetadata.signalPartitionAddressOnlyRefresh(pkRangeIdentity);
}
} else if (request.forceCollectionRoutingMapRefresh) {
ForcedRefreshMetadata forcedRefreshMetadata = this.lastForcedRefreshMap.computeIfAbsent(
collectionRid,
(colRid) -> new ForcedRefreshMetadata());
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
false);
}
logger.debug("evaluateCollectionRoutingMapRefreshForServerPartition collectionRid {}, partitionKeyRangeId {},"
+ " " +
"forceRefreshPartitionAddresses {}, forceCollectionRoutingMapRefresh {}",
collectionRid,
partitionKeyRangeId,
forceRefreshPartitionAddresses,
request.forceCollectionRoutingMapRefresh);
}
private void validatePkRangeIdentity(PartitionKeyRangeIdentity pkRangeIdentity) {
Utils.checkNotNullOrThrow(pkRangeIdentity, "pkRangeId", "");
Utils.checkNotNullOrThrow(
pkRangeIdentity.getCollectionRid(),
"pkRangeId.getCollectionRid()",
"");
Utils.checkNotNullOrThrow(
pkRangeIdentity.getPartitionKeyRangeId(),
"pkRangeId.getPartitionKeyRangeId()",
"");
}
private Mono<AddressInformation[]> getAddressesForRangeId(
RxDocumentServiceRequest request,
PartitionKeyRangeIdentity pkRangeIdentity,
boolean forceRefresh,
AddressInformation[] cachedAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
validatePkRangeIdentity(pkRangeIdentity);
String collectionRid = pkRangeIdentity.getCollectionRid();
String partitionKeyRangeId = pkRangeIdentity.getPartitionKeyRangeId();
logger.debug(
"getAddressesForRangeId collectionRid {}, partitionKeyRangeId {}, forceRefresh {}",
collectionRid,
partitionKeyRangeId,
forceRefresh);
Mono<List<Address>> addressResponse = this.getServerAddressesViaGatewayAsync(request, collectionRid, Collections.singletonList(partitionKeyRangeId), forceRefresh);
Mono<List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>>> addressInfos =
addressResponse.map(
addresses -> {
if (logger.isDebugEnabled()) {
logger.debug("addresses from getServerAddressesViaGatewayAsync in getAddressesForRangeId {}",
JavaStreamUtils.info(addresses));
}
return addresses
.stream()
.filter(addressInfo -> this.protocolScheme.equals(addressInfo.getProtocolScheme()))
.collect(Collectors.groupingBy(Address::getParitionKeyRangeId))
.values()
.stream()
.map(groupedAddresses -> toPartitionAddressAndRange(collectionRid, addresses))
.collect(Collectors.toList());
});
Mono<List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>>> result =
addressInfos
.map(addressInfo -> addressInfo.stream()
.filter(a -> StringUtils.equals(a.getLeft().getPartitionKeyRangeId(), partitionKeyRangeId))
.collect(Collectors.toList()));
return result
.flatMap(
list -> {
if (logger.isDebugEnabled()) {
logger.debug("getAddressesForRangeId flatMap got result {}", JavaStreamUtils.info(list));
}
if (list.isEmpty()) {
String errorMessage = String.format(
RMResources.PartitionKeyRangeNotFound,
partitionKeyRangeId,
collectionRid);
PartitionKeyRangeGoneException e = new PartitionKeyRangeGoneException(errorMessage);
BridgeInternal.setResourceAddress(e, collectionRid);
return Mono.error(e);
} else {
AddressInformation[] mergedAddresses = this.mergeAddresses(list.get(0).getRight(), cachedAddresses);
for (AddressInformation address : mergedAddresses) {
address.getPhysicalUri().setRefreshed();
}
if (this.replicaAddressValidationEnabled) {
this.validateReplicaAddresses(mergedAddresses);
}
return Mono.just(mergedAddresses);
}
})
.doOnError(e -> logger.debug("getAddressesForRangeId", e));
}
public Mono<List<Address>> getMasterAddressesViaGatewayAsync(
RxDocumentServiceRequest request,
ResourceType resourceType,
String resourceAddress,
String entryUrl,
boolean forceRefresh,
boolean useMasterCollectionResolver,
Map<String, Object> properties) {
logger.debug("getMasterAddressesViaGatewayAsync " +
"resourceType {}, " +
"resourceAddress {}, " +
"entryUrl {}, " +
"forceRefresh {}, " +
"useMasterCollectionResolver {}",
resourceType,
resourceAddress,
entryUrl,
forceRefresh,
useMasterCollectionResolver
);
request.setAddressRefresh(true, forceRefresh);
HashMap<String, String> queryParameters = new HashMap<>();
queryParameters.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl));
HashMap<String, String> headers = new HashMap<>(defaultRequestHeaders);
if (forceRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, "true");
}
if (useMasterCollectionResolver) {
headers.put(HttpConstants.HttpHeaders.USE_MASTER_COLLECTION_RESOLVER, "true");
}
if(request.forceCollectionRoutingMapRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_COLLECTION_ROUTING_MAP_REFRESH, "true");
}
queryParameters.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter));
headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
String token = this.tokenProvider.getUserAuthorizationToken(
resourceAddress,
resourceType,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
properties);
headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, HttpUtils.urlEncode(token));
}
URI targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(queryParameters));
String identifier = logAddressResolutionStart(
request, targetEndpoint, true, true);
HttpHeaders defaultHttpHeaders = new HttpHeaders(headers);
HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), defaultHttpHeaders);
Instant addressCallStartTime = Instant.now();
Mono<HttpResponse> httpResponseMono;
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
httpResponseMono = this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()));
} else {
httpResponseMono = tokenProvider
.populateAuthorizationHeader(defaultHttpHeaders)
.flatMap(valueHttpHeaders -> this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds())));
}
Mono<RxDocumentServiceResponse> dsrObs = HttpClientUtils.parseResponseAsync(request, this.clientContext, httpResponseMono, httpRequest);
return dsrObs.map(
dsr -> {
MetadataDiagnosticsContext metadataDiagnosticsContext =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (metadataDiagnosticsContext != null) {
Instant addressCallEndTime = Instant.now();
MetadataDiagnostics metaDataDiagnostic = new MetadataDiagnostics(addressCallStartTime,
addressCallEndTime,
MetadataType.MASTER_ADDRESS_LOOK_UP);
metadataDiagnosticsContext.addMetaDataDiagnostic(metaDataDiagnostic);
}
logAddressResolutionEnd(request, identifier, null);
return dsr.getQueryResponse(null, Address.class);
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
logAddressResolutionEnd(request, identifier, unwrappedException.toString());
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(
request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, this.globalEndpointManager);
}
return Mono.error(dce);
});
}
/***
* merge the new addresses get back from gateway with the cached addresses.
* If the address is being returned from gateway again, then keep using the cached addressInformation object
* If it is a new address being returned, then use the new addressInformation object.
*
* @param newAddresses the latest addresses being returned from gateway.
* @param cachedAddresses the cached addresses.
*
* @return the merged addresses.
*/
private void validateReplicaAddresses(AddressInformation[] addresses) {
checkNotNull(addresses, "Argument 'addresses' can not be null");
List<Uri> addressesNeedToValidation =
Arrays
.stream(addresses)
.map(address -> address.getPhysicalUri())
.filter(addressUri -> addressUri.getHealthStatus() == Uri.HealthStatus.UnhealthyPending)
.collect(Collectors.toList());
if (addressesNeedToValidation.size() > 0) {
this.openConnectionsHandler
.openConnections(addressesNeedToValidation)
.subscribeOn(CosmosSchedulers.OPEN_CONNECTIONS_BOUNDED_ELASTIC)
.subscribe();
}
}
private Pair<PartitionKeyRangeIdentity, AddressInformation[]> toPartitionAddressAndRange(String collectionRid, List<Address> addresses) {
if (logger.isDebugEnabled()) {
logger.debug("toPartitionAddressAndRange");
}
Address address = addresses.get(0);
PartitionKeyRangeIdentity partitionKeyRangeIdentity = new PartitionKeyRangeIdentity(collectionRid, address.getParitionKeyRangeId());
AddressInformation[] addressInfos =
addresses
.stream()
.map(addr -> GatewayAddressCache.toAddressInformation(addr))
.collect(Collectors.toList())
.toArray(new AddressInformation[addresses.size()]);
if (this.tcpConnectionEndpointRediscoveryEnabled) {
for (AddressInformation addressInfo : addressInfos) {
if (logger.isDebugEnabled()) {
logger.debug(
"Added address to serverPartitionAddressToPkRangeIdMap: ({\"partitionKeyRangeIdentity\":{},\"address\":{}})",
partitionKeyRangeIdentity,
addressInfo);
}
this.serverPartitionAddressToPkRangeIdMap.compute(addressInfo.getServerKey(), (serverKey, partitionKeyRangeIdentitySet) -> {
if (partitionKeyRangeIdentitySet == null) {
partitionKeyRangeIdentitySet = ConcurrentHashMap.newKeySet();
}
partitionKeyRangeIdentitySet.add(partitionKeyRangeIdentity);
return partitionKeyRangeIdentitySet;
});
}
}
return Pair.of(partitionKeyRangeIdentity, addressInfos);
}
private static AddressInformation toAddressInformation(Address address) {
return new AddressInformation(true, address.isPrimary(), address.getPhyicalUri(), address.getProtocolScheme());
}
public Flux<OpenConnectionResponse> openConnectionsAndInitCaches(
DocumentCollection collection,
List<PartitionKeyRangeIdentity> partitionKeyRangeIdentities) {
checkNotNull(collection, "Argument 'collection' should not be null");
checkNotNull(partitionKeyRangeIdentities, "Argument 'partitionKeyRangeIdentities' should not be null");
if (logger.isDebugEnabled()) {
logger.debug(
"openConnectionsAndInitCaches collection: {}, partitionKeyRangeIdentities: {}",
collection.getResourceId(),
JavaStreamUtils.toString(partitionKeyRangeIdentities, ","));
}
List<Flux<List<Address>>> tasks = new ArrayList<>();
int batchSize = GatewayAddressCache.DefaultBatchSize;
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this.clientContext,
OperationType.Read,
collection.getResourceId(),
ResourceType.DocumentCollection,
Collections.emptyMap());
for (int i = 0; i < partitionKeyRangeIdentities.size(); i += batchSize) {
int endIndex = i + batchSize;
endIndex = Math.min(endIndex, partitionKeyRangeIdentities.size());
tasks.add(
this.getServerAddressesViaGatewayWithRetry(
request,
collection.getResourceId(),
partitionKeyRangeIdentities
.subList(i, endIndex)
.stream()
.map(PartitionKeyRangeIdentity::getPartitionKeyRangeId)
.collect(Collectors.toList()),
false).flux());
}
return Flux.concat(tasks)
.flatMap(list -> {
List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>> addressInfos =
list.stream()
.filter(addressInfo -> this.protocolScheme.equals(addressInfo.getProtocolScheme()))
.collect(Collectors.groupingBy(Address::getParitionKeyRangeId))
.values()
.stream().map(addresses -> toPartitionAddressAndRange(collection.getResourceId(), addresses))
.collect(Collectors.toList());
return Flux.fromIterable(addressInfos)
.flatMap(addressInfo -> {
this.serverPartitionAddressCache.set(addressInfo.getLeft(), addressInfo.getRight());
if (this.openConnectionsHandler != null) {
return this.openConnectionsHandler.openConnections(
Arrays
.stream(addressInfo.getRight())
.map(addressInformation -> addressInformation.getPhysicalUri())
.collect(Collectors.toList()));
}
logger.info("OpenConnectionHandler is null, can not open connections");
return Flux.empty();
});
});
}
private Mono<List<Address>> getServerAddressesViaGatewayWithRetry(
RxDocumentServiceRequest request,
String collectionRid,
List<String> partitionKeyRangeIds,
boolean forceRefresh) {
OpenConnectionAndInitCachesRetryPolicy openConnectionAndInitCachesRetryPolicy =
new OpenConnectionAndInitCachesRetryPolicy(this.connectionPolicy.getThrottlingRetryOptions());
return BackoffRetryUtility.executeRetry(
() -> this.getServerAddressesViaGatewayAsync(request, collectionRid, partitionKeyRangeIds, forceRefresh),
openConnectionAndInitCachesRetryPolicy);
}
private boolean notAllReplicasAvailable(AddressInformation[] addressInformations) {
return addressInformations.length < ServiceConfig.SystemReplicationPolicy.MaxReplicaSetSize;
}
private static String logAddressResolutionStart(
RxDocumentServiceRequest request,
URI targetEndpointUrl,
boolean forceRefresh,
boolean forceCollectionRoutingMapRefresh) {
if (request.requestContext.cosmosDiagnostics != null) {
return BridgeInternal.recordAddressResolutionStart(
request.requestContext.cosmosDiagnostics,
targetEndpointUrl,
forceRefresh,
forceCollectionRoutingMapRefresh);
}
return null;
}
private static void logAddressResolutionEnd(RxDocumentServiceRequest request, String identifier, String errorMessage) {
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordAddressResolutionEnd(request.requestContext.cosmosDiagnostics, identifier, errorMessage);
}
}
private static class ForcedRefreshMetadata {
private final ConcurrentHashMap<PartitionKeyRangeIdentity, Instant> lastPartitionAddressOnlyRefresh;
private Instant lastCollectionRoutingMapRefresh;
public ForcedRefreshMetadata() {
lastPartitionAddressOnlyRefresh = new ConcurrentHashMap<>();
lastCollectionRoutingMapRefresh = Instant.now();
}
public void signalCollectionRoutingMapRefresh(
PartitionKeyRangeIdentity pk,
boolean forcePartitionAddressRefresh) {
Instant nowSnapshot = Instant.now();
if (forcePartitionAddressRefresh) {
lastPartitionAddressOnlyRefresh.put(pk, nowSnapshot);
}
lastCollectionRoutingMapRefresh = nowSnapshot;
}
public void signalPartitionAddressOnlyRefresh(PartitionKeyRangeIdentity pk) {
lastPartitionAddressOnlyRefresh.put(pk, Instant.now());
}
public boolean shouldIncludeCollectionRoutingMapRefresh(PartitionKeyRangeIdentity pk) {
Instant lastPartitionAddressRefreshSnapshot = lastPartitionAddressOnlyRefresh.get(pk);
Instant lastCollectionRoutingMapRefreshSnapshot = lastCollectionRoutingMapRefresh;
if (lastPartitionAddressRefreshSnapshot == null ||
!lastPartitionAddressRefreshSnapshot.isAfter(lastCollectionRoutingMapRefreshSnapshot)) {
return false;
}
Duration durationSinceLastForcedCollectionRoutingMapRefresh =
Duration.between(lastCollectionRoutingMapRefreshSnapshot, Instant.now());
boolean returnValue = durationSinceLastForcedCollectionRoutingMapRefresh
.compareTo(minDurationBeforeEnforcingCollectionRoutingMapRefresh) >= 0;
return returnValue;
}
}
} |
we also want to remove the addresses not returned in the new addresses | private AddressInformation[] mergeAddresses(AddressInformation[] newAddresses, AddressInformation[] cachedAddresses) {
checkNotNull(newAddresses, "Argument 'newAddresses' should not be null");
if (cachedAddresses == null) {
return newAddresses;
}
List<AddressInformation> mergedAddresses = new ArrayList<>();
Map<Uri, AddressInformation> cachedAddressMap =
Arrays
.stream(cachedAddresses)
.collect(Collectors.toMap(address -> address.getPhysicalUri(), address -> address));
for (AddressInformation addressInformation : newAddresses) {
if (cachedAddressMap.containsKey(addressInformation.getPhysicalUri())) {
mergedAddresses.add(cachedAddressMap.get(addressInformation.getPhysicalUri()));
} else {
mergedAddresses.add(addressInformation);
}
}
return mergedAddresses.toArray(new AddressInformation[mergedAddresses.size()]);
} | return mergedAddresses.toArray(new AddressInformation[mergedAddresses.size()]); | private AddressInformation[] mergeAddresses(AddressInformation[] newAddresses, AddressInformation[] cachedAddresses) {
checkNotNull(newAddresses, "Argument 'newAddresses' should not be null");
if (cachedAddresses == null) {
return newAddresses;
}
List<AddressInformation> mergedAddresses = new ArrayList<>();
Map<Uri, List<AddressInformation>> cachedAddressMap =
Arrays
.stream(cachedAddresses)
.collect(Collectors.groupingBy(AddressInformation::getPhysicalUri));
for (AddressInformation newAddress : newAddresses) {
boolean useCachedAddress = false;
if (cachedAddressMap.containsKey(newAddress.getPhysicalUri())) {
for (AddressInformation cachedAddress : cachedAddressMap.get(newAddress.getPhysicalUri())) {
if (newAddress.getProtocol() == cachedAddress.getProtocol()
&& newAddress.isPublic() == cachedAddress.isPublic()
&& newAddress.isPrimary() == cachedAddress.isPrimary()) {
useCachedAddress = true;
mergedAddresses.add(cachedAddress);
break;
}
}
}
if (!useCachedAddress) {
mergedAddresses.add(newAddress);
}
}
return mergedAddresses.toArray(new AddressInformation[mergedAddresses.size()]);
} | class GatewayAddressCache implements IAddressCache {
private final static Duration minDurationBeforeEnforcingCollectionRoutingMapRefresh = Duration.ofSeconds(30);
private final static Logger logger = LoggerFactory.getLogger(GatewayAddressCache.class);
private final static String protocolFilterFormat = "%s eq %s";
private final static int DefaultBatchSize = 50;
private final static int DefaultSuboptimalPartitionForceRefreshIntervalInSeconds = 600;
private final DiagnosticsClientContext clientContext;
private final String databaseFeedEntryUrl = PathsHelper.generatePath(ResourceType.Database, "", true);
private final URI addressEndpoint;
private final AsyncCache<PartitionKeyRangeIdentity, AddressInformation[]> serverPartitionAddressCache;
private final ConcurrentHashMap<PartitionKeyRangeIdentity, Instant> suboptimalServerPartitionTimestamps;
private final long suboptimalPartitionForceRefreshIntervalInSeconds;
private final String protocolScheme;
private final String protocolFilter;
private final IAuthorizationTokenProvider tokenProvider;
private final HashMap<String, String> defaultRequestHeaders;
private final HttpClient httpClient;
private volatile Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterPartitionAddressCache;
private volatile Instant suboptimalMasterPartitionTimestamp;
private final ConcurrentHashMap<URI, Set<PartitionKeyRangeIdentity>> serverPartitionAddressToPkRangeIdMap;
private final boolean tcpConnectionEndpointRediscoveryEnabled;
private final ConcurrentHashMap<String, ForcedRefreshMetadata> lastForcedRefreshMap;
private final GlobalEndpointManager globalEndpointManager;
private IOpenConnectionsHandler openConnectionsHandler;
private final ConnectionPolicy connectionPolicy;
private final boolean replicaAddressValidationEnabled;
public GatewayAddressCache(
DiagnosticsClientContext clientContext,
URI serviceEndpoint,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
UserAgentContainer userAgent,
HttpClient httpClient,
long suboptimalPartitionForceRefreshIntervalInSeconds,
boolean tcpConnectionEndpointRediscoveryEnabled,
ApiType apiType,
GlobalEndpointManager globalEndpointManager,
ConnectionPolicy connectionPolicy,
IOpenConnectionsHandler openConnectionsHandler) {
this.clientContext = clientContext;
try {
this.addressEndpoint = new URL(serviceEndpoint.toURL(), Paths.ADDRESS_PATH_SEGMENT).toURI();
} catch (MalformedURLException | URISyntaxException e) {
logger.error("serviceEndpoint {} is invalid", serviceEndpoint, e);
assert false;
throw new IllegalStateException(e);
}
this.tokenProvider = tokenProvider;
this.serverPartitionAddressCache = new AsyncCache<>();
this.suboptimalServerPartitionTimestamps = new ConcurrentHashMap<>();
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
this.suboptimalPartitionForceRefreshIntervalInSeconds = suboptimalPartitionForceRefreshIntervalInSeconds;
this.protocolScheme = protocol.scheme();
this.protocolFilter = String.format(GatewayAddressCache.protocolFilterFormat,
Constants.Properties.PROTOCOL,
this.protocolScheme);
this.httpClient = httpClient;
if (userAgent == null) {
userAgent = new UserAgentContainer();
}
defaultRequestHeaders = new HashMap<>();
defaultRequestHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgent.getUserAgent());
if(apiType != null) {
defaultRequestHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString());
}
defaultRequestHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION);
this.serverPartitionAddressToPkRangeIdMap = new ConcurrentHashMap<>();
this.tcpConnectionEndpointRediscoveryEnabled = tcpConnectionEndpointRediscoveryEnabled;
this.lastForcedRefreshMap = new ConcurrentHashMap<>();
this.globalEndpointManager = globalEndpointManager;
this.openConnectionsHandler = openConnectionsHandler;
this.connectionPolicy = connectionPolicy;
this.replicaAddressValidationEnabled = Configs.isReplicaAddressValidationEnabled();
}
public GatewayAddressCache(
DiagnosticsClientContext clientContext,
URI serviceEndpoint,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
UserAgentContainer userAgent,
HttpClient httpClient,
boolean tcpConnectionEndpointRediscoveryEnabled,
ApiType apiType,
GlobalEndpointManager globalEndpointManager,
ConnectionPolicy connectionPolicy,
IOpenConnectionsHandler openConnectionsHandler) {
this(clientContext,
serviceEndpoint,
protocol,
tokenProvider,
userAgent,
httpClient,
DefaultSuboptimalPartitionForceRefreshIntervalInSeconds,
tcpConnectionEndpointRediscoveryEnabled,
apiType,
globalEndpointManager,
connectionPolicy,
openConnectionsHandler);
}
@Override
public int updateAddresses(final URI serverKey) {
Objects.requireNonNull(serverKey, "expected non-null serverKey");
AtomicInteger updatedCacheEntryCount = new AtomicInteger(0);
if (this.tcpConnectionEndpointRediscoveryEnabled) {
this.serverPartitionAddressToPkRangeIdMap.computeIfPresent(serverKey, (uri, partitionKeyRangeIdentitySet) -> {
for (PartitionKeyRangeIdentity partitionKeyRangeIdentity : partitionKeyRangeIdentitySet) {
if (partitionKeyRangeIdentity.getPartitionKeyRangeId().equals(PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) {
this.masterPartitionAddressCache = null;
} else {
this.serverPartitionAddressCache.remove(partitionKeyRangeIdentity);
}
updatedCacheEntryCount.incrementAndGet();
}
return null;
});
} else {
logger.warn("tcpConnectionEndpointRediscovery is not enabled, should not reach here.");
}
return updatedCacheEntryCount.get();
}
@Override
public Mono<Utils.ValueHolder<AddressInformation[]>> tryGetAddresses(RxDocumentServiceRequest request,
PartitionKeyRangeIdentity partitionKeyRangeIdentity,
boolean forceRefreshPartitionAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
Utils.checkNotNullOrThrow(partitionKeyRangeIdentity, "partitionKeyRangeIdentity", "");
logger.debug("PartitionKeyRangeIdentity {}, forceRefreshPartitionAddresses {}",
partitionKeyRangeIdentity,
forceRefreshPartitionAddresses);
if (StringUtils.equals(partitionKeyRangeIdentity.getPartitionKeyRangeId(),
PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) {
return this.resolveMasterAsync(request, forceRefreshPartitionAddresses, request.properties)
.map(partitionKeyRangeIdentityPair -> new Utils.ValueHolder<>(partitionKeyRangeIdentityPair.getRight()));
}
evaluateCollectionRoutingMapRefreshForServerPartition(
request, partitionKeyRangeIdentity, forceRefreshPartitionAddresses);
Instant suboptimalServerPartitionTimestamp = this.suboptimalServerPartitionTimestamps.get(partitionKeyRangeIdentity);
if (suboptimalServerPartitionTimestamp != null) {
logger.debug("suboptimalServerPartitionTimestamp is {}", suboptimalServerPartitionTimestamp);
boolean forceRefreshDueToSuboptimalPartitionReplicaSet = Duration.between(suboptimalServerPartitionTimestamp, Instant.now()).getSeconds()
> this.suboptimalPartitionForceRefreshIntervalInSeconds;
if (forceRefreshDueToSuboptimalPartitionReplicaSet) {
Instant newValue = this.suboptimalServerPartitionTimestamps.computeIfPresent(partitionKeyRangeIdentity,
(key, oldVal) -> {
logger.debug("key = {}, oldValue = {}", key, oldVal);
if (suboptimalServerPartitionTimestamp.equals(oldVal)) {
return Instant.MAX;
} else {
return oldVal;
}
});
logger.debug("newValue is {}", newValue);
if (!suboptimalServerPartitionTimestamp.equals(newValue)) {
logger.debug("setting forceRefreshPartitionAddresses to true");
forceRefreshPartitionAddresses = true;
}
}
}
final boolean forceRefreshPartitionAddressesModified = forceRefreshPartitionAddresses;
if (forceRefreshPartitionAddressesModified) {
logger.debug("refresh serverPartitionAddressCache for {}", partitionKeyRangeIdentity);
for (Uri uri : request.requestContext.getFailedEndpoints()) {
uri.setUnhealthy();
}
this.serverPartitionAddressCache.refreshWithInitFunction(
partitionKeyRangeIdentity,
cachedAddresses -> this.getAddressesForRangeId(
request,
partitionKeyRangeIdentity,
true,
cachedAddresses));
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
}
Mono<Utils.ValueHolder<AddressInformation[]>> addressesObs =
this.serverPartitionAddressCache
.getAsyncWithInitFunction(
partitionKeyRangeIdentity,
null,
cachedAddresses -> this.getAddressesForRangeId(
request,
partitionKeyRangeIdentity,
false,
cachedAddresses))
.map(Utils.ValueHolder::new);
return addressesObs
.map(
addressesValueHolder -> {
if (notAllReplicasAvailable(addressesValueHolder.v)) {
if (logger.isDebugEnabled()) {
logger.debug("not all replicas available {}", JavaStreamUtils.info(addressesValueHolder.v));
}
this.suboptimalServerPartitionTimestamps.putIfAbsent(partitionKeyRangeIdentity, Instant.now());
}
if (Arrays
.stream(addressesValueHolder.v)
.anyMatch(addressInformation -> addressInformation.getPhysicalUri().shouldRefreshHealthStatus())) {
logger.info("refresh cache due to address uri in unhealthy status");
this.serverPartitionAddressCache.refreshWithInitFunction(
partitionKeyRangeIdentity,
cachedAddresses -> this.getAddressesForRangeId(
request,
partitionKeyRangeIdentity,
true,
cachedAddresses));
}
return addressesValueHolder;
})
.onErrorResume(ex -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(ex);
CosmosException dce = Utils.as(unwrappedException, CosmosException.class);
if (dce == null) {
logger.error("unexpected failure", ex);
if (forceRefreshPartitionAddressesModified) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
}
return Mono.error(unwrappedException);
} else {
logger.debug("tryGetAddresses dce", dce);
if (Exceptions.isStatusCode(dce, HttpConstants.StatusCodes.NOTFOUND) ||
Exceptions.isStatusCode(dce, HttpConstants.StatusCodes.GONE) ||
Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE)) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
logger.debug("tryGetAddresses: inner onErrorResumeNext return null", dce);
return Mono.just(new Utils.ValueHolder<>(null));
}
return Mono.error(unwrappedException);
}
});
}
@Override
public void setOpenConnectionsHandler(IOpenConnectionsHandler openConnectionsHandler) {
this.openConnectionsHandler = openConnectionsHandler;
}
public Mono<List<Address>> getServerAddressesViaGatewayAsync(
RxDocumentServiceRequest request,
String collectionRid,
List<String> partitionKeyRangeIds,
boolean forceRefresh) {
if (logger.isDebugEnabled()) {
logger.debug("getServerAddressesViaGatewayAsync collectionRid {}, partitionKeyRangeIds {}", collectionRid,
JavaStreamUtils.toString(partitionKeyRangeIds, ","));
}
request.setAddressRefresh(true, forceRefresh);
String entryUrl = PathsHelper.generatePath(ResourceType.Document, collectionRid, true);
HashMap<String, String> addressQuery = new HashMap<>();
addressQuery.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl));
HashMap<String, String> headers = new HashMap<>(defaultRequestHeaders);
if (forceRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, "true");
}
if (request.forceCollectionRoutingMapRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_COLLECTION_ROUTING_MAP_REFRESH, "true");
}
addressQuery.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter));
addressQuery.put(HttpConstants.QueryStrings.PARTITION_KEY_RANGE_IDS, String.join(",", partitionKeyRangeIds));
headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
String token = null;
try {
token = this.tokenProvider.getUserAuthorizationToken(
collectionRid,
ResourceType.Document,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
request.properties);
} catch (UnauthorizedException e) {
if (logger.isDebugEnabled()) {
logger.debug("User doesn't have resource token for collection rid {}", collectionRid);
}
}
if (token == null && request.getIsNameBased()) {
String collectionAltLink = PathsHelper.getCollectionPath(request.getResourceAddress());
token = this.tokenProvider.getUserAuthorizationToken(
collectionAltLink,
ResourceType.Document,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
request.properties);
}
token = HttpUtils.urlEncode(token);
headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, token);
}
URI targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(addressQuery));
String identifier = logAddressResolutionStart(
request, targetEndpoint, forceRefresh, request.forceCollectionRoutingMapRefresh);
HttpHeaders httpHeaders = new HttpHeaders(headers);
Instant addressCallStartTime = Instant.now();
HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders);
Mono<HttpResponse> httpResponseMono;
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
httpResponseMono = this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()));
} else {
httpResponseMono = tokenProvider
.populateAuthorizationHeader(httpHeaders)
.flatMap(valueHttpHeaders -> this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds())));
}
Mono<RxDocumentServiceResponse> dsrObs = HttpClientUtils.parseResponseAsync(request, clientContext, httpResponseMono, httpRequest);
return dsrObs.map(
dsr -> {
MetadataDiagnosticsContext metadataDiagnosticsContext =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (metadataDiagnosticsContext != null) {
Instant addressCallEndTime = Instant.now();
MetadataDiagnostics metaDataDiagnostic = new MetadataDiagnostics(addressCallStartTime,
addressCallEndTime,
MetadataType.SERVER_ADDRESS_LOOKUP);
metadataDiagnosticsContext.addMetaDataDiagnostic(metaDataDiagnostic);
}
if (logger.isDebugEnabled()) {
logger.debug("getServerAddressesViaGatewayAsync deserializes result");
}
logAddressResolutionEnd(request, identifier, null);
return dsr.getQueryResponse(null, Address.class);
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
logAddressResolutionEnd(request, identifier, unwrappedException.toString());
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(
request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, this.globalEndpointManager);
}
return Mono.error(dce);
});
}
public void dispose() {
}
private Mono<Pair<PartitionKeyRangeIdentity, AddressInformation[]>> resolveMasterAsync(RxDocumentServiceRequest request, boolean forceRefresh, Map<String, Object> properties) {
logger.debug("resolveMasterAsync forceRefresh: {}", forceRefresh);
Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterAddressAndRangeInitial = this.masterPartitionAddressCache;
forceRefresh = forceRefresh ||
(masterAddressAndRangeInitial != null &&
notAllReplicasAvailable(masterAddressAndRangeInitial.getRight()) &&
Duration.between(this.suboptimalMasterPartitionTimestamp, Instant.now()).getSeconds() > this.suboptimalPartitionForceRefreshIntervalInSeconds);
if (forceRefresh || this.masterPartitionAddressCache == null) {
Mono<List<Address>> masterReplicaAddressesObs = this.getMasterAddressesViaGatewayAsync(
request,
ResourceType.Database,
null,
databaseFeedEntryUrl,
forceRefresh,
false,
properties);
return masterReplicaAddressesObs.map(
masterAddresses -> {
Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterAddressAndRangeRes =
this.toPartitionAddressAndRange("", masterAddresses);
this.masterPartitionAddressCache = masterAddressAndRangeRes;
if (notAllReplicasAvailable(masterAddressAndRangeRes.getRight())
&& this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) {
this.suboptimalMasterPartitionTimestamp = Instant.now();
} else {
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
}
return masterPartitionAddressCache;
})
.doOnError(
e -> {
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
});
} else {
if (notAllReplicasAvailable(masterAddressAndRangeInitial.getRight())
&& this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) {
this.suboptimalMasterPartitionTimestamp = Instant.now();
}
return Mono.just(masterAddressAndRangeInitial);
}
}
private void evaluateCollectionRoutingMapRefreshForServerPartition(
RxDocumentServiceRequest request,
PartitionKeyRangeIdentity pkRangeIdentity,
boolean forceRefreshPartitionAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
validatePkRangeIdentity(pkRangeIdentity);
String collectionRid = pkRangeIdentity.getCollectionRid();
String partitionKeyRangeId = pkRangeIdentity.getPartitionKeyRangeId();
if (forceRefreshPartitionAddresses) {
ForcedRefreshMetadata forcedRefreshMetadata = this.lastForcedRefreshMap.computeIfAbsent(
collectionRid,
(colRid) -> new ForcedRefreshMetadata());
if (request.forceCollectionRoutingMapRefresh) {
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
true);
} else if (forcedRefreshMetadata.shouldIncludeCollectionRoutingMapRefresh(pkRangeIdentity)) {
request.forceCollectionRoutingMapRefresh = true;
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
true);
} else {
forcedRefreshMetadata.signalPartitionAddressOnlyRefresh(pkRangeIdentity);
}
} else if (request.forceCollectionRoutingMapRefresh) {
ForcedRefreshMetadata forcedRefreshMetadata = this.lastForcedRefreshMap.computeIfAbsent(
collectionRid,
(colRid) -> new ForcedRefreshMetadata());
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
false);
}
logger.debug("evaluateCollectionRoutingMapRefreshForServerPartition collectionRid {}, partitionKeyRangeId {},"
+ " " +
"forceRefreshPartitionAddresses {}, forceCollectionRoutingMapRefresh {}",
collectionRid,
partitionKeyRangeId,
forceRefreshPartitionAddresses,
request.forceCollectionRoutingMapRefresh);
}
private void validatePkRangeIdentity(PartitionKeyRangeIdentity pkRangeIdentity) {
Utils.checkNotNullOrThrow(pkRangeIdentity, "pkRangeId", "");
Utils.checkNotNullOrThrow(
pkRangeIdentity.getCollectionRid(),
"pkRangeId.getCollectionRid()",
"");
Utils.checkNotNullOrThrow(
pkRangeIdentity.getPartitionKeyRangeId(),
"pkRangeId.getPartitionKeyRangeId()",
"");
}
private Mono<AddressInformation[]> getAddressesForRangeId(
RxDocumentServiceRequest request,
PartitionKeyRangeIdentity pkRangeIdentity,
boolean forceRefresh,
AddressInformation[] cachedAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
validatePkRangeIdentity(pkRangeIdentity);
String collectionRid = pkRangeIdentity.getCollectionRid();
String partitionKeyRangeId = pkRangeIdentity.getPartitionKeyRangeId();
logger.debug(
"getAddressesForRangeId collectionRid {}, partitionKeyRangeId {}, forceRefresh {}",
collectionRid,
partitionKeyRangeId,
forceRefresh);
Mono<List<Address>> addressResponse = this.getServerAddressesViaGatewayAsync(request, collectionRid, Collections.singletonList(partitionKeyRangeId), forceRefresh);
Mono<List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>>> addressInfos =
addressResponse.map(
addresses -> {
if (logger.isDebugEnabled()) {
logger.debug("addresses from getServerAddressesViaGatewayAsync in getAddressesForRangeId {}",
JavaStreamUtils.info(addresses));
}
return addresses
.stream()
.filter(addressInfo -> this.protocolScheme.equals(addressInfo.getProtocolScheme()))
.collect(Collectors.groupingBy(Address::getParitionKeyRangeId))
.values()
.stream()
.map(groupedAddresses -> toPartitionAddressAndRange(collectionRid, addresses))
.collect(Collectors.toList());
});
Mono<List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>>> result =
addressInfos
.map(addressInfo -> addressInfo.stream()
.filter(a -> StringUtils.equals(a.getLeft().getPartitionKeyRangeId(), partitionKeyRangeId))
.collect(Collectors.toList()));
return result
.flatMap(
list -> {
if (logger.isDebugEnabled()) {
logger.debug("getAddressesForRangeId flatMap got result {}", JavaStreamUtils.info(list));
}
if (list.isEmpty()) {
String errorMessage = String.format(
RMResources.PartitionKeyRangeNotFound,
partitionKeyRangeId,
collectionRid);
PartitionKeyRangeGoneException e = new PartitionKeyRangeGoneException(errorMessage);
BridgeInternal.setResourceAddress(e, collectionRid);
return Mono.error(e);
} else {
AddressInformation[] mergedAddresses = this.mergeAddresses(list.get(0).getRight(), cachedAddresses);
for (AddressInformation address : mergedAddresses) {
address.getPhysicalUri().setRefreshed();
}
if (this.replicaAddressValidationEnabled) {
this.validateReplicaAddresses(mergedAddresses);
}
return Mono.just(mergedAddresses);
}
})
.doOnError(e -> logger.debug("getAddressesForRangeId", e));
}
public Mono<List<Address>> getMasterAddressesViaGatewayAsync(
RxDocumentServiceRequest request,
ResourceType resourceType,
String resourceAddress,
String entryUrl,
boolean forceRefresh,
boolean useMasterCollectionResolver,
Map<String, Object> properties) {
logger.debug("getMasterAddressesViaGatewayAsync " +
"resourceType {}, " +
"resourceAddress {}, " +
"entryUrl {}, " +
"forceRefresh {}, " +
"useMasterCollectionResolver {}",
resourceType,
resourceAddress,
entryUrl,
forceRefresh,
useMasterCollectionResolver
);
request.setAddressRefresh(true, forceRefresh);
HashMap<String, String> queryParameters = new HashMap<>();
queryParameters.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl));
HashMap<String, String> headers = new HashMap<>(defaultRequestHeaders);
if (forceRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, "true");
}
if (useMasterCollectionResolver) {
headers.put(HttpConstants.HttpHeaders.USE_MASTER_COLLECTION_RESOLVER, "true");
}
if(request.forceCollectionRoutingMapRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_COLLECTION_ROUTING_MAP_REFRESH, "true");
}
queryParameters.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter));
headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
String token = this.tokenProvider.getUserAuthorizationToken(
resourceAddress,
resourceType,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
properties);
headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, HttpUtils.urlEncode(token));
}
URI targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(queryParameters));
String identifier = logAddressResolutionStart(
request, targetEndpoint, true, true);
HttpHeaders defaultHttpHeaders = new HttpHeaders(headers);
HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), defaultHttpHeaders);
Instant addressCallStartTime = Instant.now();
Mono<HttpResponse> httpResponseMono;
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
httpResponseMono = this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()));
} else {
httpResponseMono = tokenProvider
.populateAuthorizationHeader(defaultHttpHeaders)
.flatMap(valueHttpHeaders -> this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds())));
}
Mono<RxDocumentServiceResponse> dsrObs = HttpClientUtils.parseResponseAsync(request, this.clientContext, httpResponseMono, httpRequest);
return dsrObs.map(
dsr -> {
MetadataDiagnosticsContext metadataDiagnosticsContext =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (metadataDiagnosticsContext != null) {
Instant addressCallEndTime = Instant.now();
MetadataDiagnostics metaDataDiagnostic = new MetadataDiagnostics(addressCallStartTime,
addressCallEndTime,
MetadataType.MASTER_ADDRESS_LOOK_UP);
metadataDiagnosticsContext.addMetaDataDiagnostic(metaDataDiagnostic);
}
logAddressResolutionEnd(request, identifier, null);
return dsr.getQueryResponse(null, Address.class);
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
logAddressResolutionEnd(request, identifier, unwrappedException.toString());
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(
request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, this.globalEndpointManager);
}
return Mono.error(dce);
});
}
/***
* merge the new addresses get back from gateway with the cached addresses.
* If the address is being returned from gateway again, then keep using the cached addressInformation object
* If it is a new address being returned, then use the new addressInformation object.
*
* @param newAddresses the latest addresses being returned from gateway.
* @param cachedAddresses the cached addresses.
*
* @return the merged addresses.
*/
private void validateReplicaAddresses(AddressInformation[] addresses) {
checkNotNull(addresses, "Argument 'addresses' can not be null");
List<Uri> addressesNeedToValidation =
Arrays
.stream(addresses)
.map(address -> address.getPhysicalUri())
.filter(addressUri -> addressUri.getHealthStatus() == Uri.HealthStatus.UnhealthyPending)
.collect(Collectors.toList());
if (addressesNeedToValidation.size() > 0) {
this.openConnectionsHandler
.openConnections(addressesNeedToValidation)
.subscribeOn(CosmosSchedulers.OPEN_CONNECTIONS_BOUNDED_ELASTIC)
.subscribe();
}
}
private Pair<PartitionKeyRangeIdentity, AddressInformation[]> toPartitionAddressAndRange(String collectionRid, List<Address> addresses) {
if (logger.isDebugEnabled()) {
logger.debug("toPartitionAddressAndRange");
}
Address address = addresses.get(0);
PartitionKeyRangeIdentity partitionKeyRangeIdentity = new PartitionKeyRangeIdentity(collectionRid, address.getParitionKeyRangeId());
AddressInformation[] addressInfos =
addresses
.stream()
.map(addr -> GatewayAddressCache.toAddressInformation(addr))
.collect(Collectors.toList())
.toArray(new AddressInformation[addresses.size()]);
if (this.tcpConnectionEndpointRediscoveryEnabled) {
for (AddressInformation addressInfo : addressInfos) {
if (logger.isDebugEnabled()) {
logger.debug(
"Added address to serverPartitionAddressToPkRangeIdMap: ({\"partitionKeyRangeIdentity\":{},\"address\":{}})",
partitionKeyRangeIdentity,
addressInfo);
}
this.serverPartitionAddressToPkRangeIdMap.compute(addressInfo.getServerKey(), (serverKey, partitionKeyRangeIdentitySet) -> {
if (partitionKeyRangeIdentitySet == null) {
partitionKeyRangeIdentitySet = ConcurrentHashMap.newKeySet();
}
partitionKeyRangeIdentitySet.add(partitionKeyRangeIdentity);
return partitionKeyRangeIdentitySet;
});
}
}
return Pair.of(partitionKeyRangeIdentity, addressInfos);
}
private static AddressInformation toAddressInformation(Address address) {
return new AddressInformation(true, address.isPrimary(), address.getPhyicalUri(), address.getProtocolScheme());
}
public Flux<OpenConnectionResponse> openConnectionsAndInitCaches(
DocumentCollection collection,
List<PartitionKeyRangeIdentity> partitionKeyRangeIdentities) {
checkNotNull(collection, "Argument 'collection' should not be null");
checkNotNull(partitionKeyRangeIdentities, "Argument 'partitionKeyRangeIdentities' should not be null");
if (logger.isDebugEnabled()) {
logger.debug(
"openConnectionsAndInitCaches collection: {}, partitionKeyRangeIdentities: {}",
collection.getResourceId(),
JavaStreamUtils.toString(partitionKeyRangeIdentities, ","));
}
List<Flux<List<Address>>> tasks = new ArrayList<>();
int batchSize = GatewayAddressCache.DefaultBatchSize;
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this.clientContext,
OperationType.Read,
collection.getResourceId(),
ResourceType.DocumentCollection,
Collections.emptyMap());
for (int i = 0; i < partitionKeyRangeIdentities.size(); i += batchSize) {
int endIndex = i + batchSize;
endIndex = Math.min(endIndex, partitionKeyRangeIdentities.size());
tasks.add(
this.getServerAddressesViaGatewayWithRetry(
request,
collection.getResourceId(),
partitionKeyRangeIdentities
.subList(i, endIndex)
.stream()
.map(PartitionKeyRangeIdentity::getPartitionKeyRangeId)
.collect(Collectors.toList()),
false).flux());
}
return Flux.concat(tasks)
.flatMap(list -> {
List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>> addressInfos =
list.stream()
.filter(addressInfo -> this.protocolScheme.equals(addressInfo.getProtocolScheme()))
.collect(Collectors.groupingBy(Address::getParitionKeyRangeId))
.values()
.stream().map(addresses -> toPartitionAddressAndRange(collection.getResourceId(), addresses))
.collect(Collectors.toList());
return Flux.fromIterable(addressInfos)
.flatMap(addressInfo -> {
this.serverPartitionAddressCache.set(addressInfo.getLeft(), addressInfo.getRight());
if (this.openConnectionsHandler != null) {
return this.openConnectionsHandler.openConnections(
Arrays
.stream(addressInfo.getRight())
.map(addressInformation -> addressInformation.getPhysicalUri())
.collect(Collectors.toList()));
}
logger.info("OpenConnectionHandler is null, can not open connections");
return Flux.empty();
});
});
}
private Mono<List<Address>> getServerAddressesViaGatewayWithRetry(
RxDocumentServiceRequest request,
String collectionRid,
List<String> partitionKeyRangeIds,
boolean forceRefresh) {
OpenConnectionAndInitCachesRetryPolicy openConnectionAndInitCachesRetryPolicy =
new OpenConnectionAndInitCachesRetryPolicy(this.connectionPolicy.getThrottlingRetryOptions());
return BackoffRetryUtility.executeRetry(
() -> this.getServerAddressesViaGatewayAsync(request, collectionRid, partitionKeyRangeIds, forceRefresh),
openConnectionAndInitCachesRetryPolicy);
}
private boolean notAllReplicasAvailable(AddressInformation[] addressInformations) {
return addressInformations.length < ServiceConfig.SystemReplicationPolicy.MaxReplicaSetSize;
}
private static String logAddressResolutionStart(
RxDocumentServiceRequest request,
URI targetEndpointUrl,
boolean forceRefresh,
boolean forceCollectionRoutingMapRefresh) {
if (request.requestContext.cosmosDiagnostics != null) {
return BridgeInternal.recordAddressResolutionStart(
request.requestContext.cosmosDiagnostics,
targetEndpointUrl,
forceRefresh,
forceCollectionRoutingMapRefresh);
}
return null;
}
private static void logAddressResolutionEnd(RxDocumentServiceRequest request, String identifier, String errorMessage) {
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordAddressResolutionEnd(request.requestContext.cosmosDiagnostics, identifier, errorMessage);
}
}
private static class ForcedRefreshMetadata {
private final ConcurrentHashMap<PartitionKeyRangeIdentity, Instant> lastPartitionAddressOnlyRefresh;
private Instant lastCollectionRoutingMapRefresh;
public ForcedRefreshMetadata() {
lastPartitionAddressOnlyRefresh = new ConcurrentHashMap<>();
lastCollectionRoutingMapRefresh = Instant.now();
}
public void signalCollectionRoutingMapRefresh(
PartitionKeyRangeIdentity pk,
boolean forcePartitionAddressRefresh) {
Instant nowSnapshot = Instant.now();
if (forcePartitionAddressRefresh) {
lastPartitionAddressOnlyRefresh.put(pk, nowSnapshot);
}
lastCollectionRoutingMapRefresh = nowSnapshot;
}
public void signalPartitionAddressOnlyRefresh(PartitionKeyRangeIdentity pk) {
lastPartitionAddressOnlyRefresh.put(pk, Instant.now());
}
public boolean shouldIncludeCollectionRoutingMapRefresh(PartitionKeyRangeIdentity pk) {
Instant lastPartitionAddressRefreshSnapshot = lastPartitionAddressOnlyRefresh.get(pk);
Instant lastCollectionRoutingMapRefreshSnapshot = lastCollectionRoutingMapRefresh;
if (lastPartitionAddressRefreshSnapshot == null ||
!lastPartitionAddressRefreshSnapshot.isAfter(lastCollectionRoutingMapRefreshSnapshot)) {
return false;
}
Duration durationSinceLastForcedCollectionRoutingMapRefresh =
Duration.between(lastCollectionRoutingMapRefreshSnapshot, Instant.now());
boolean returnValue = durationSinceLastForcedCollectionRoutingMapRefresh
.compareTo(minDurationBeforeEnforcingCollectionRoutingMapRefresh) >= 0;
return returnValue;
}
}
} | class GatewayAddressCache implements IAddressCache {
private final static Duration minDurationBeforeEnforcingCollectionRoutingMapRefresh = Duration.ofSeconds(30);
private final static Logger logger = LoggerFactory.getLogger(GatewayAddressCache.class);
private final static String protocolFilterFormat = "%s eq %s";
private final static int DefaultBatchSize = 50;
private final static int DefaultSuboptimalPartitionForceRefreshIntervalInSeconds = 600;
private final DiagnosticsClientContext clientContext;
private final String databaseFeedEntryUrl = PathsHelper.generatePath(ResourceType.Database, "", true);
private final URI addressEndpoint;
private final AsyncCacheNonBlocking<PartitionKeyRangeIdentity, AddressInformation[]> serverPartitionAddressCache;
private final ConcurrentHashMap<PartitionKeyRangeIdentity, Instant> suboptimalServerPartitionTimestamps;
private final long suboptimalPartitionForceRefreshIntervalInSeconds;
private final String protocolScheme;
private final String protocolFilter;
private final IAuthorizationTokenProvider tokenProvider;
private final HashMap<String, String> defaultRequestHeaders;
private final HttpClient httpClient;
private volatile Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterPartitionAddressCache;
private volatile Instant suboptimalMasterPartitionTimestamp;
private final ConcurrentHashMap<URI, Set<PartitionKeyRangeIdentity>> serverPartitionAddressToPkRangeIdMap;
private final boolean tcpConnectionEndpointRediscoveryEnabled;
private final ConcurrentHashMap<String, ForcedRefreshMetadata> lastForcedRefreshMap;
private final GlobalEndpointManager globalEndpointManager;
private IOpenConnectionsHandler openConnectionsHandler;
private final ConnectionPolicy connectionPolicy;
private final boolean replicaAddressValidationEnabled;
public GatewayAddressCache(
DiagnosticsClientContext clientContext,
URI serviceEndpoint,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
UserAgentContainer userAgent,
HttpClient httpClient,
long suboptimalPartitionForceRefreshIntervalInSeconds,
boolean tcpConnectionEndpointRediscoveryEnabled,
ApiType apiType,
GlobalEndpointManager globalEndpointManager,
ConnectionPolicy connectionPolicy,
IOpenConnectionsHandler openConnectionsHandler) {
this.clientContext = clientContext;
try {
this.addressEndpoint = new URL(serviceEndpoint.toURL(), Paths.ADDRESS_PATH_SEGMENT).toURI();
} catch (MalformedURLException | URISyntaxException e) {
logger.error("serviceEndpoint {} is invalid", serviceEndpoint, e);
assert false;
throw new IllegalStateException(e);
}
this.tokenProvider = tokenProvider;
this.serverPartitionAddressCache = new AsyncCacheNonBlocking<>();
this.suboptimalServerPartitionTimestamps = new ConcurrentHashMap<>();
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
this.suboptimalPartitionForceRefreshIntervalInSeconds = suboptimalPartitionForceRefreshIntervalInSeconds;
this.protocolScheme = protocol.scheme();
this.protocolFilter = String.format(GatewayAddressCache.protocolFilterFormat,
Constants.Properties.PROTOCOL,
this.protocolScheme);
this.httpClient = httpClient;
if (userAgent == null) {
userAgent = new UserAgentContainer();
}
defaultRequestHeaders = new HashMap<>();
defaultRequestHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgent.getUserAgent());
if(apiType != null) {
defaultRequestHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString());
}
defaultRequestHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION);
this.serverPartitionAddressToPkRangeIdMap = new ConcurrentHashMap<>();
this.tcpConnectionEndpointRediscoveryEnabled = tcpConnectionEndpointRediscoveryEnabled;
this.lastForcedRefreshMap = new ConcurrentHashMap<>();
this.globalEndpointManager = globalEndpointManager;
this.openConnectionsHandler = openConnectionsHandler;
this.connectionPolicy = connectionPolicy;
this.replicaAddressValidationEnabled = Configs.isReplicaAddressValidationEnabled();
}
public GatewayAddressCache(
DiagnosticsClientContext clientContext,
URI serviceEndpoint,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
UserAgentContainer userAgent,
HttpClient httpClient,
boolean tcpConnectionEndpointRediscoveryEnabled,
ApiType apiType,
GlobalEndpointManager globalEndpointManager,
ConnectionPolicy connectionPolicy,
IOpenConnectionsHandler openConnectionsHandler) {
this(clientContext,
serviceEndpoint,
protocol,
tokenProvider,
userAgent,
httpClient,
DefaultSuboptimalPartitionForceRefreshIntervalInSeconds,
tcpConnectionEndpointRediscoveryEnabled,
apiType,
globalEndpointManager,
connectionPolicy,
openConnectionsHandler);
}
@Override
public int updateAddresses(final URI serverKey) {
Objects.requireNonNull(serverKey, "expected non-null serverKey");
AtomicInteger updatedCacheEntryCount = new AtomicInteger(0);
if (this.tcpConnectionEndpointRediscoveryEnabled) {
this.serverPartitionAddressToPkRangeIdMap.computeIfPresent(serverKey, (uri, partitionKeyRangeIdentitySet) -> {
for (PartitionKeyRangeIdentity partitionKeyRangeIdentity : partitionKeyRangeIdentitySet) {
if (partitionKeyRangeIdentity.getPartitionKeyRangeId().equals(PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) {
this.masterPartitionAddressCache = null;
} else {
this.serverPartitionAddressCache.remove(partitionKeyRangeIdentity);
}
updatedCacheEntryCount.incrementAndGet();
}
return null;
});
} else {
logger.warn("tcpConnectionEndpointRediscovery is not enabled, should not reach here.");
}
return updatedCacheEntryCount.get();
}
@Override
public Mono<Utils.ValueHolder<AddressInformation[]>> tryGetAddresses(RxDocumentServiceRequest request,
PartitionKeyRangeIdentity partitionKeyRangeIdentity,
boolean forceRefreshPartitionAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
Utils.checkNotNullOrThrow(partitionKeyRangeIdentity, "partitionKeyRangeIdentity", "");
logger.debug("PartitionKeyRangeIdentity {}, forceRefreshPartitionAddresses {}",
partitionKeyRangeIdentity,
forceRefreshPartitionAddresses);
if (StringUtils.equals(partitionKeyRangeIdentity.getPartitionKeyRangeId(),
PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) {
return this.resolveMasterAsync(request, forceRefreshPartitionAddresses, request.properties)
.map(partitionKeyRangeIdentityPair -> new Utils.ValueHolder<>(partitionKeyRangeIdentityPair.getRight()));
}
evaluateCollectionRoutingMapRefreshForServerPartition(
request, partitionKeyRangeIdentity, forceRefreshPartitionAddresses);
Instant suboptimalServerPartitionTimestamp = this.suboptimalServerPartitionTimestamps.get(partitionKeyRangeIdentity);
if (suboptimalServerPartitionTimestamp != null) {
logger.debug("suboptimalServerPartitionTimestamp is {}", suboptimalServerPartitionTimestamp);
boolean forceRefreshDueToSuboptimalPartitionReplicaSet = Duration.between(suboptimalServerPartitionTimestamp, Instant.now()).getSeconds()
> this.suboptimalPartitionForceRefreshIntervalInSeconds;
if (forceRefreshDueToSuboptimalPartitionReplicaSet) {
Instant newValue = this.suboptimalServerPartitionTimestamps.computeIfPresent(partitionKeyRangeIdentity,
(key, oldVal) -> {
logger.debug("key = {}, oldValue = {}", key, oldVal);
if (suboptimalServerPartitionTimestamp.equals(oldVal)) {
return Instant.MAX;
} else {
return oldVal;
}
});
logger.debug("newValue is {}", newValue);
if (!suboptimalServerPartitionTimestamp.equals(newValue)) {
logger.debug("setting forceRefreshPartitionAddresses to true");
forceRefreshPartitionAddresses = true;
}
}
}
final boolean forceRefreshPartitionAddressesModified = forceRefreshPartitionAddresses;
if (forceRefreshPartitionAddressesModified) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
}
Mono<Utils.ValueHolder<AddressInformation[]>> addressesObs =
this.serverPartitionAddressCache
.getAsync(
partitionKeyRangeIdentity,
cachedAddresses -> this.getAddressesForRangeId(
request,
partitionKeyRangeIdentity,
forceRefreshPartitionAddressesModified,
cachedAddresses),
cachedAddresses -> {
for (Uri failedEndpoints : request.requestContext.getFailedEndpoints()) {
failedEndpoints.setUnhealthy();
}
return forceRefreshPartitionAddressesModified
|| Arrays.stream(cachedAddresses).anyMatch(addressInformation -> addressInformation.getPhysicalUri().shouldRefreshHealthStatus());
})
.map(Utils.ValueHolder::new);
return addressesObs
.map(addressesValueHolder -> {
if (notAllReplicasAvailable(addressesValueHolder.v)) {
if (logger.isDebugEnabled()) {
logger.debug("not all replicas available {}", JavaStreamUtils.info(addressesValueHolder.v));
}
this.suboptimalServerPartitionTimestamps.putIfAbsent(partitionKeyRangeIdentity, Instant.now());
}
return addressesValueHolder;
})
.onErrorResume(ex -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(ex);
CosmosException dce = Utils.as(unwrappedException, CosmosException.class);
if (dce == null) {
logger.error("unexpected failure", ex);
if (forceRefreshPartitionAddressesModified) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
}
return Mono.error(unwrappedException);
} else {
logger.debug("tryGetAddresses dce", dce);
if (Exceptions.isNotFound(dce) ||
Exceptions.isGone(dce) ||
Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE)) {
this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity);
logger.debug("tryGetAddresses: inner onErrorResumeNext return null", dce);
return Mono.just(new Utils.ValueHolder<>(null));
}
return Mono.error(unwrappedException);
}
});
}
@Override
public void setOpenConnectionsHandler(IOpenConnectionsHandler openConnectionsHandler) {
this.openConnectionsHandler = openConnectionsHandler;
}
public Mono<List<Address>> getServerAddressesViaGatewayAsync(
RxDocumentServiceRequest request,
String collectionRid,
List<String> partitionKeyRangeIds,
boolean forceRefresh) {
if (logger.isDebugEnabled()) {
logger.debug("getServerAddressesViaGatewayAsync collectionRid {}, partitionKeyRangeIds {}", collectionRid,
JavaStreamUtils.toString(partitionKeyRangeIds, ","));
}
request.setAddressRefresh(true, forceRefresh);
String entryUrl = PathsHelper.generatePath(ResourceType.Document, collectionRid, true);
HashMap<String, String> addressQuery = new HashMap<>();
addressQuery.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl));
HashMap<String, String> headers = new HashMap<>(defaultRequestHeaders);
if (forceRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, "true");
}
if (request.forceCollectionRoutingMapRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_COLLECTION_ROUTING_MAP_REFRESH, "true");
}
addressQuery.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter));
addressQuery.put(HttpConstants.QueryStrings.PARTITION_KEY_RANGE_IDS, String.join(",", partitionKeyRangeIds));
headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
String token = null;
try {
token = this.tokenProvider.getUserAuthorizationToken(
collectionRid,
ResourceType.Document,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
request.properties);
} catch (UnauthorizedException e) {
if (logger.isDebugEnabled()) {
logger.debug("User doesn't have resource token for collection rid {}", collectionRid);
}
}
if (token == null && request.getIsNameBased()) {
String collectionAltLink = PathsHelper.getCollectionPath(request.getResourceAddress());
token = this.tokenProvider.getUserAuthorizationToken(
collectionAltLink,
ResourceType.Document,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
request.properties);
}
token = HttpUtils.urlEncode(token);
headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, token);
}
URI targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(addressQuery));
String identifier = logAddressResolutionStart(
request, targetEndpoint, forceRefresh, request.forceCollectionRoutingMapRefresh);
HttpHeaders httpHeaders = new HttpHeaders(headers);
Instant addressCallStartTime = Instant.now();
HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders);
Mono<HttpResponse> httpResponseMono;
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
httpResponseMono = this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()));
} else {
httpResponseMono = tokenProvider
.populateAuthorizationHeader(httpHeaders)
.flatMap(valueHttpHeaders -> this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds())));
}
Mono<RxDocumentServiceResponse> dsrObs = HttpClientUtils.parseResponseAsync(request, clientContext, httpResponseMono, httpRequest);
return dsrObs.map(
dsr -> {
MetadataDiagnosticsContext metadataDiagnosticsContext =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (metadataDiagnosticsContext != null) {
Instant addressCallEndTime = Instant.now();
MetadataDiagnostics metaDataDiagnostic = new MetadataDiagnostics(addressCallStartTime,
addressCallEndTime,
MetadataType.SERVER_ADDRESS_LOOKUP);
metadataDiagnosticsContext.addMetaDataDiagnostic(metaDataDiagnostic);
}
if (logger.isDebugEnabled()) {
logger.debug("getServerAddressesViaGatewayAsync deserializes result");
}
logAddressResolutionEnd(request, identifier, null);
return dsr.getQueryResponse(null, Address.class);
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
logAddressResolutionEnd(request, identifier, unwrappedException.toString());
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(
request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, this.globalEndpointManager);
}
return Mono.error(dce);
});
}
public void dispose() {
}
private Mono<Pair<PartitionKeyRangeIdentity, AddressInformation[]>> resolveMasterAsync(RxDocumentServiceRequest request, boolean forceRefresh, Map<String, Object> properties) {
logger.debug("resolveMasterAsync forceRefresh: {}", forceRefresh);
Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterAddressAndRangeInitial = this.masterPartitionAddressCache;
forceRefresh = forceRefresh ||
(masterAddressAndRangeInitial != null &&
notAllReplicasAvailable(masterAddressAndRangeInitial.getRight()) &&
Duration.between(this.suboptimalMasterPartitionTimestamp, Instant.now()).getSeconds() > this.suboptimalPartitionForceRefreshIntervalInSeconds);
if (forceRefresh || this.masterPartitionAddressCache == null) {
Mono<List<Address>> masterReplicaAddressesObs = this.getMasterAddressesViaGatewayAsync(
request,
ResourceType.Database,
null,
databaseFeedEntryUrl,
forceRefresh,
false,
properties);
return masterReplicaAddressesObs.map(
masterAddresses -> {
Pair<PartitionKeyRangeIdentity, AddressInformation[]> masterAddressAndRangeRes =
this.toPartitionAddressAndRange("", masterAddresses);
this.masterPartitionAddressCache = masterAddressAndRangeRes;
if (notAllReplicasAvailable(masterAddressAndRangeRes.getRight())
&& this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) {
this.suboptimalMasterPartitionTimestamp = Instant.now();
} else {
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
}
return masterPartitionAddressCache;
})
.doOnError(
e -> {
this.suboptimalMasterPartitionTimestamp = Instant.MAX;
});
} else {
if (notAllReplicasAvailable(masterAddressAndRangeInitial.getRight())
&& this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) {
this.suboptimalMasterPartitionTimestamp = Instant.now();
}
return Mono.just(masterAddressAndRangeInitial);
}
}
private void evaluateCollectionRoutingMapRefreshForServerPartition(
RxDocumentServiceRequest request,
PartitionKeyRangeIdentity pkRangeIdentity,
boolean forceRefreshPartitionAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
validatePkRangeIdentity(pkRangeIdentity);
String collectionRid = pkRangeIdentity.getCollectionRid();
String partitionKeyRangeId = pkRangeIdentity.getPartitionKeyRangeId();
if (forceRefreshPartitionAddresses) {
ForcedRefreshMetadata forcedRefreshMetadata = this.lastForcedRefreshMap.computeIfAbsent(
collectionRid,
(colRid) -> new ForcedRefreshMetadata());
if (request.forceCollectionRoutingMapRefresh) {
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
true);
} else if (forcedRefreshMetadata.shouldIncludeCollectionRoutingMapRefresh(pkRangeIdentity)) {
request.forceCollectionRoutingMapRefresh = true;
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
true);
} else {
forcedRefreshMetadata.signalPartitionAddressOnlyRefresh(pkRangeIdentity);
}
} else if (request.forceCollectionRoutingMapRefresh) {
ForcedRefreshMetadata forcedRefreshMetadata = this.lastForcedRefreshMap.computeIfAbsent(
collectionRid,
(colRid) -> new ForcedRefreshMetadata());
forcedRefreshMetadata.signalCollectionRoutingMapRefresh(
pkRangeIdentity,
false);
}
logger.debug("evaluateCollectionRoutingMapRefreshForServerPartition collectionRid {}, partitionKeyRangeId {},"
+ " " +
"forceRefreshPartitionAddresses {}, forceCollectionRoutingMapRefresh {}",
collectionRid,
partitionKeyRangeId,
forceRefreshPartitionAddresses,
request.forceCollectionRoutingMapRefresh);
}
private void validatePkRangeIdentity(PartitionKeyRangeIdentity pkRangeIdentity) {
Utils.checkNotNullOrThrow(pkRangeIdentity, "pkRangeId", "");
Utils.checkNotNullOrThrow(
pkRangeIdentity.getCollectionRid(),
"pkRangeId.getCollectionRid()",
"");
Utils.checkNotNullOrThrow(
pkRangeIdentity.getPartitionKeyRangeId(),
"pkRangeId.getPartitionKeyRangeId()",
"");
}
private Mono<AddressInformation[]> getAddressesForRangeId(
RxDocumentServiceRequest request,
PartitionKeyRangeIdentity pkRangeIdentity,
boolean forceRefresh,
AddressInformation[] cachedAddresses) {
Utils.checkNotNullOrThrow(request, "request", "");
validatePkRangeIdentity(pkRangeIdentity);
String collectionRid = pkRangeIdentity.getCollectionRid();
String partitionKeyRangeId = pkRangeIdentity.getPartitionKeyRangeId();
logger.debug(
"getAddressesForRangeId collectionRid {}, partitionKeyRangeId {}, forceRefresh {}",
collectionRid,
partitionKeyRangeId,
forceRefresh);
Mono<List<Address>> addressResponse = this.getServerAddressesViaGatewayAsync(request, collectionRid, Collections.singletonList(partitionKeyRangeId), forceRefresh);
Mono<List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>>> addressInfos =
addressResponse.map(
addresses -> {
if (logger.isDebugEnabled()) {
logger.debug("addresses from getServerAddressesViaGatewayAsync in getAddressesForRangeId {}",
JavaStreamUtils.info(addresses));
}
return addresses
.stream()
.filter(addressInfo -> this.protocolScheme.equals(addressInfo.getProtocolScheme()))
.collect(Collectors.groupingBy(Address::getParitionKeyRangeId))
.values()
.stream()
.map(groupedAddresses -> toPartitionAddressAndRange(collectionRid, addresses))
.collect(Collectors.toList());
});
Mono<List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>>> result =
addressInfos
.map(addressInfo -> addressInfo.stream()
.filter(a -> StringUtils.equals(a.getLeft().getPartitionKeyRangeId(), partitionKeyRangeId))
.collect(Collectors.toList()));
return result
.flatMap(
list -> {
if (logger.isDebugEnabled()) {
logger.debug("getAddressesForRangeId flatMap got result {}", JavaStreamUtils.info(list));
}
if (list.isEmpty()) {
String errorMessage = String.format(
RMResources.PartitionKeyRangeNotFound,
partitionKeyRangeId,
collectionRid);
PartitionKeyRangeGoneException e = new PartitionKeyRangeGoneException(errorMessage);
BridgeInternal.setResourceAddress(e, collectionRid);
return Mono.error(e);
} else {
AddressInformation[] mergedAddresses = this.mergeAddresses(list.get(0).getRight(), cachedAddresses);
for (AddressInformation address : mergedAddresses) {
address.getPhysicalUri().setRefreshed();
}
if (this.replicaAddressValidationEnabled) {
this.validateReplicaAddresses(mergedAddresses);
}
return Mono.just(mergedAddresses);
}
})
.doOnError(e -> logger.debug("getAddressesForRangeId", e));
}
public Mono<List<Address>> getMasterAddressesViaGatewayAsync(
RxDocumentServiceRequest request,
ResourceType resourceType,
String resourceAddress,
String entryUrl,
boolean forceRefresh,
boolean useMasterCollectionResolver,
Map<String, Object> properties) {
logger.debug("getMasterAddressesViaGatewayAsync " +
"resourceType {}, " +
"resourceAddress {}, " +
"entryUrl {}, " +
"forceRefresh {}, " +
"useMasterCollectionResolver {}",
resourceType,
resourceAddress,
entryUrl,
forceRefresh,
useMasterCollectionResolver
);
request.setAddressRefresh(true, forceRefresh);
HashMap<String, String> queryParameters = new HashMap<>();
queryParameters.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl));
HashMap<String, String> headers = new HashMap<>(defaultRequestHeaders);
if (forceRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, "true");
}
if (useMasterCollectionResolver) {
headers.put(HttpConstants.HttpHeaders.USE_MASTER_COLLECTION_RESOLVER, "true");
}
if(request.forceCollectionRoutingMapRefresh) {
headers.put(HttpConstants.HttpHeaders.FORCE_COLLECTION_ROUTING_MAP_REFRESH, "true");
}
queryParameters.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter));
headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
String token = this.tokenProvider.getUserAuthorizationToken(
resourceAddress,
resourceType,
RequestVerb.GET,
headers,
AuthorizationTokenType.PrimaryMasterKey,
properties);
headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, HttpUtils.urlEncode(token));
}
URI targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(queryParameters));
String identifier = logAddressResolutionStart(
request, targetEndpoint, true, true);
HttpHeaders defaultHttpHeaders = new HttpHeaders(headers);
HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), defaultHttpHeaders);
Instant addressCallStartTime = Instant.now();
Mono<HttpResponse> httpResponseMono;
if (tokenProvider.getAuthorizationTokenType() != AuthorizationTokenType.AadToken) {
httpResponseMono = this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()));
} else {
httpResponseMono = tokenProvider
.populateAuthorizationHeader(defaultHttpHeaders)
.flatMap(valueHttpHeaders -> this.httpClient.send(httpRequest,
Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds())));
}
Mono<RxDocumentServiceResponse> dsrObs = HttpClientUtils.parseResponseAsync(request, this.clientContext, httpResponseMono, httpRequest);
return dsrObs.map(
dsr -> {
MetadataDiagnosticsContext metadataDiagnosticsContext =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (metadataDiagnosticsContext != null) {
Instant addressCallEndTime = Instant.now();
MetadataDiagnostics metaDataDiagnostic = new MetadataDiagnostics(addressCallStartTime,
addressCallEndTime,
MetadataType.MASTER_ADDRESS_LOOK_UP);
metadataDiagnosticsContext.addMetaDataDiagnostic(metaDataDiagnostic);
}
logAddressResolutionEnd(request, identifier, null);
return dsr.getQueryResponse(null, Address.class);
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
logAddressResolutionEnd(request, identifier, unwrappedException.toString());
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(
request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, this.globalEndpointManager);
}
return Mono.error(dce);
});
}
/***
* merge the new addresses get back from gateway with the cached addresses.
* If the address is being returned from gateway again, then keep using the cached addressInformation object
* If it is a new address being returned, then use the new addressInformation object.
*
* @param newAddresses the latest addresses being returned from gateway.
* @param cachedAddresses the cached addresses.
*
* @return the merged addresses.
*/
private void validateReplicaAddresses(AddressInformation[] addresses) {
checkNotNull(addresses, "Argument 'addresses' can not be null");
List<Uri> addressesNeedToValidation =
Arrays
.stream(addresses)
.map(address -> address.getPhysicalUri())
.filter(addressUri -> addressUri.getHealthStatus() == Uri.HealthStatus.UnhealthyPending)
.collect(Collectors.toList());
if (addressesNeedToValidation.size() > 0) {
this.openConnectionsHandler
.openConnections(addressesNeedToValidation)
.subscribeOn(CosmosSchedulers.OPEN_CONNECTIONS_BOUNDED_ELASTIC)
.subscribe();
}
}
private Pair<PartitionKeyRangeIdentity, AddressInformation[]> toPartitionAddressAndRange(String collectionRid, List<Address> addresses) {
if (logger.isDebugEnabled()) {
logger.debug("toPartitionAddressAndRange");
}
Address address = addresses.get(0);
PartitionKeyRangeIdentity partitionKeyRangeIdentity = new PartitionKeyRangeIdentity(collectionRid, address.getParitionKeyRangeId());
AddressInformation[] addressInfos =
addresses
.stream()
.map(addr -> GatewayAddressCache.toAddressInformation(addr))
.collect(Collectors.toList())
.toArray(new AddressInformation[addresses.size()]);
if (this.tcpConnectionEndpointRediscoveryEnabled) {
for (AddressInformation addressInfo : addressInfos) {
if (logger.isDebugEnabled()) {
logger.debug(
"Added address to serverPartitionAddressToPkRangeIdMap: ({\"partitionKeyRangeIdentity\":{},\"address\":{}})",
partitionKeyRangeIdentity,
addressInfo);
}
this.serverPartitionAddressToPkRangeIdMap.compute(addressInfo.getServerKey(), (serverKey, partitionKeyRangeIdentitySet) -> {
if (partitionKeyRangeIdentitySet == null) {
partitionKeyRangeIdentitySet = ConcurrentHashMap.newKeySet();
}
partitionKeyRangeIdentitySet.add(partitionKeyRangeIdentity);
return partitionKeyRangeIdentitySet;
});
}
}
return Pair.of(partitionKeyRangeIdentity, addressInfos);
}
private static AddressInformation toAddressInformation(Address address) {
return new AddressInformation(true, address.isPrimary(), address.getPhyicalUri(), address.getProtocolScheme());
}
public Flux<OpenConnectionResponse> openConnectionsAndInitCaches(
DocumentCollection collection,
List<PartitionKeyRangeIdentity> partitionKeyRangeIdentities) {
checkNotNull(collection, "Argument 'collection' should not be null");
checkNotNull(partitionKeyRangeIdentities, "Argument 'partitionKeyRangeIdentities' should not be null");
if (logger.isDebugEnabled()) {
logger.debug(
"openConnectionsAndInitCaches collection: {}, partitionKeyRangeIdentities: {}",
collection.getResourceId(),
JavaStreamUtils.toString(partitionKeyRangeIdentities, ","));
}
List<Flux<List<Address>>> tasks = new ArrayList<>();
int batchSize = GatewayAddressCache.DefaultBatchSize;
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this.clientContext,
OperationType.Read,
collection.getResourceId(),
ResourceType.DocumentCollection,
Collections.emptyMap());
for (int i = 0; i < partitionKeyRangeIdentities.size(); i += batchSize) {
int endIndex = i + batchSize;
endIndex = Math.min(endIndex, partitionKeyRangeIdentities.size());
tasks.add(
this.getServerAddressesViaGatewayWithRetry(
request,
collection.getResourceId(),
partitionKeyRangeIdentities
.subList(i, endIndex)
.stream()
.map(PartitionKeyRangeIdentity::getPartitionKeyRangeId)
.collect(Collectors.toList()),
false).flux());
}
return Flux.concat(tasks)
.flatMap(list -> {
List<Pair<PartitionKeyRangeIdentity, AddressInformation[]>> addressInfos =
list.stream()
.filter(addressInfo -> this.protocolScheme.equals(addressInfo.getProtocolScheme()))
.collect(Collectors.groupingBy(Address::getParitionKeyRangeId))
.values()
.stream().map(addresses -> toPartitionAddressAndRange(collection.getResourceId(), addresses))
.collect(Collectors.toList());
return Flux.fromIterable(addressInfos)
.flatMap(addressInfo -> {
this.serverPartitionAddressCache.set(addressInfo.getLeft(), addressInfo.getRight());
if (this.openConnectionsHandler != null) {
return this.openConnectionsHandler.openConnections(
Arrays
.stream(addressInfo.getRight())
.map(addressInformation -> addressInformation.getPhysicalUri())
.collect(Collectors.toList()));
}
logger.info("OpenConnectionHandler is null, can not open connections");
return Flux.empty();
});
});
}
private Mono<List<Address>> getServerAddressesViaGatewayWithRetry(
RxDocumentServiceRequest request,
String collectionRid,
List<String> partitionKeyRangeIds,
boolean forceRefresh) {
OpenConnectionAndInitCachesRetryPolicy openConnectionAndInitCachesRetryPolicy =
new OpenConnectionAndInitCachesRetryPolicy(this.connectionPolicy.getThrottlingRetryOptions());
return BackoffRetryUtility.executeRetry(
() -> this.getServerAddressesViaGatewayAsync(request, collectionRid, partitionKeyRangeIds, forceRefresh),
openConnectionAndInitCachesRetryPolicy);
}
private boolean notAllReplicasAvailable(AddressInformation[] addressInformations) {
return addressInformations.length < ServiceConfig.SystemReplicationPolicy.MaxReplicaSetSize;
}
private static String logAddressResolutionStart(
RxDocumentServiceRequest request,
URI targetEndpointUrl,
boolean forceRefresh,
boolean forceCollectionRoutingMapRefresh) {
if (request.requestContext.cosmosDiagnostics != null) {
return BridgeInternal.recordAddressResolutionStart(
request.requestContext.cosmosDiagnostics,
targetEndpointUrl,
forceRefresh,
forceCollectionRoutingMapRefresh);
}
return null;
}
private static void logAddressResolutionEnd(RxDocumentServiceRequest request, String identifier, String errorMessage) {
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordAddressResolutionEnd(request.requestContext.cosmosDiagnostics, identifier, errorMessage);
}
}
private static class ForcedRefreshMetadata {
private final ConcurrentHashMap<PartitionKeyRangeIdentity, Instant> lastPartitionAddressOnlyRefresh;
private Instant lastCollectionRoutingMapRefresh;
public ForcedRefreshMetadata() {
lastPartitionAddressOnlyRefresh = new ConcurrentHashMap<>();
lastCollectionRoutingMapRefresh = Instant.now();
}
public void signalCollectionRoutingMapRefresh(
PartitionKeyRangeIdentity pk,
boolean forcePartitionAddressRefresh) {
Instant nowSnapshot = Instant.now();
if (forcePartitionAddressRefresh) {
lastPartitionAddressOnlyRefresh.put(pk, nowSnapshot);
}
lastCollectionRoutingMapRefresh = nowSnapshot;
}
public void signalPartitionAddressOnlyRefresh(PartitionKeyRangeIdentity pk) {
lastPartitionAddressOnlyRefresh.put(pk, Instant.now());
}
public boolean shouldIncludeCollectionRoutingMapRefresh(PartitionKeyRangeIdentity pk) {
Instant lastPartitionAddressRefreshSnapshot = lastPartitionAddressOnlyRefresh.get(pk);
Instant lastCollectionRoutingMapRefreshSnapshot = lastCollectionRoutingMapRefresh;
if (lastPartitionAddressRefreshSnapshot == null ||
!lastPartitionAddressRefreshSnapshot.isAfter(lastCollectionRoutingMapRefreshSnapshot)) {
return false;
}
Duration durationSinceLastForcedCollectionRoutingMapRefresh =
Duration.between(lastCollectionRoutingMapRefreshSnapshot, Instant.now());
boolean returnValue = durationSinceLastForcedCollectionRoutingMapRefresh
.compareTo(minDurationBeforeEnforcingCollectionRoutingMapRefresh) >= 0;
return returnValue;
}
}
} |
Perhaps we just need `AbstractDownloadTest` and `AbstractUploadTest` ? | public Mono<Void> globalSetupAsync() {
Mono<Void> uploadMono;
if (this.createBlob && this.singletonBlob) {
uploadMono = blobAsyncClient.upload(createRandomByteBufferFlux(options.getSize()), null)
.doFinally(x -> System.out.println("upload finished")).then();
} else {
uploadMono = Mono.empty();
}
return super.globalSetupAsync()
.then(uploadMono)
.then();
} | } | public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then();
} | class BlobTestBase<TOptions extends BlobPerfStressOptions> extends ContainerTest<TOptions> {
public static final int DEFAULT_BUFFER_SIZE = 8192;
protected final BlobClient blobClient;
protected final BlockBlobClient blockBlobClient;
protected final BlobAsyncClient blobAsyncClient;
protected final BlockBlobAsyncClient blockBlobAsyncClient;
private final boolean createBlob;
private final boolean singletonBlob;
public BlobTestBase(TOptions options, boolean createBlob, boolean singletonBlob) {
super(options);
String blobName = "randomblobtest-" + (singletonBlob ? "" : UUID.randomUUID().toString());
if (options.getClientEncryption() != null) {
EncryptionVersion version;
if (options.getClientEncryption().equals("1.0")) {
version = EncryptionVersion.V1;
} else if (options.getClientEncryption().equals("2.0")) {
version = EncryptionVersion.V2;
} else {
throw new IllegalArgumentException("Encryption version not recognized");
}
Random rand = new Random(System.currentTimeMillis());
byte[] data = new byte[256];
rand.nextBytes(data);
FakeKey key = new FakeKey("keyId", data);
EncryptedBlobClientBuilder builder = new EncryptedBlobClientBuilder(version)
.blobClient(blobContainerClient.getBlobClient(blobName))
.key(key, KeyWrapAlgorithm.A256KW.toString());
blobClient = builder.buildEncryptedBlobClient();
blobAsyncClient = builder.buildEncryptedBlobAsyncClient();
} else {
blobClient = blobContainerClient.getBlobClient(blobName);
blobAsyncClient = blobContainerAsyncClient.getBlobAsyncClient(blobName);
}
blockBlobClient = blobClient.getBlockBlobClient();
blockBlobAsyncClient = blobAsyncClient.getBlockBlobAsyncClient();
this.createBlob = createBlob;
this.singletonBlob = singletonBlob;
}
@Override
@Override
public Mono<Void> setupAsync() {
Mono<Void> uploadMono;
if (this.createBlob && !this.singletonBlob) {
uploadMono = blobAsyncClient.upload(createRandomByteBufferFlux(options.getSize()), null)
.doFinally(x -> System.out.println("upload finished")).then();
} else {
uploadMono = Mono.empty();
}
return super.setupAsync()
.then(uploadMono)
.then();
}
public long copyStream(InputStream input, OutputStream out) throws IOException {
long transferred = 0;
byte[] buffer = new byte[DEFAULT_BUFFER_SIZE];
int read;
while ((read = input.read(buffer, 0, DEFAULT_BUFFER_SIZE)) >= 0) {
out.write(buffer, 0, read);
transferred += read;
}
return transferred;
}
} | class BlobTestBase<TOptions extends BlobPerfStressOptions> extends ContainerTest<TOptions> {
public static final int DEFAULT_BUFFER_SIZE = 8192;
protected static final String BLOB_NAME_PREFIX = "randomblobtest-";
protected final BlobClient blobClient;
protected final BlockBlobClient blockBlobClient;
protected final BlobAsyncClient blobAsyncClient;
protected final BlockBlobAsyncClient blockBlobAsyncClient;
private static final FakeKey fakeKeyEncryptionKey;
static {
Random rand = new Random(System.currentTimeMillis());
byte[] data = new byte[256];
rand.nextBytes(data);
fakeKeyEncryptionKey = new FakeKey("keyId", data);
}
public BlobTestBase(TOptions options, String blobName) {
super(options);
if (options.getClientEncryption() != null) {
EncryptionVersion version;
if (options.getClientEncryption().equals("1.0")) {
version = EncryptionVersion.V1;
} else if (options.getClientEncryption().equals("2.0")) {
version = EncryptionVersion.V2;
} else {
throw new IllegalArgumentException("Encryption version not recognized");
}
EncryptedBlobClientBuilder builder = new EncryptedBlobClientBuilder(version)
.blobClient(blobContainerClient.getBlobClient(blobName))
.key(fakeKeyEncryptionKey, KeyWrapAlgorithm.A256KW.toString());
blobClient = builder.buildEncryptedBlobClient();
blobAsyncClient = builder.buildEncryptedBlobAsyncClient();
} else {
blobClient = blobContainerClient.getBlobClient(blobName);
blobAsyncClient = blobContainerAsyncClient.getBlobAsyncClient(blobName);
}
blockBlobClient = blobContainerClient.getBlobClient(blobName).getBlockBlobClient();
blockBlobAsyncClient = blobContainerAsyncClient.getBlobAsyncClient(blobName).getBlockBlobAsyncClient();
}
@Override
@Override
public Mono<Void> setupAsync() {
return super.setupAsync()
.then();
}
public long copyStream(InputStream input, OutputStream out) throws IOException {
long transferred = 0;
byte[] buffer = new byte[DEFAULT_BUFFER_SIZE];
int read;
while ((read = input.read(buffer, 0, DEFAULT_BUFFER_SIZE)) >= 0) {
out.write(buffer, 0, read);
transferred += read;
}
return transferred;
}
} |
mgmt uses this https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/core/azure-core-management/src/main/java/com/azure/core/management/serializer/SerializerFactory.java If you poll this, create serializerAdapter before the loop. | public Mono<CsmDeploymentStatus> getDeploymentStatusAsync(String deploymentId) {
AtomicLong pollCount = new AtomicLong();
Duration pollDuration = manager().serviceClient().getDefaultPollInterval();
return this.manager().serviceClient().getWebApps()
.getProductionSiteDeploymentStatusWithResponseAsync(this.resourceGroupName(), this.name(), deploymentId)
.flatMap(fluxResponse -> {
if (pollDuration.multipliedBy(pollCount.get()).compareTo(MAX_DEPLOYMENT_STATUS_TIMEOUT) < 0) {
HttpResponse response = new HttpFluxBBResponse(fluxResponse);
if (fluxResponse.getStatusCode() / 100 != 2) {
return Mono.error(new ManagementException("Service responds with a non-20x response.", response));
}
return response.getBodyAsString()
.flatMap(bodyString -> {
SerializerAdapter serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter();
CsmDeploymentStatus status;
try {
status = serializerAdapter.deserialize(bodyString, CsmDeploymentStatus.class, SerializerEncoding.JSON);
} catch (IOException e) {
return Mono.error(new ManagementException("Deserialize failed for response body.", response));
}
if (status == null) {
return Mono.empty();
}
return Mono.just(status);
});
} else {
return Mono.error(new ManagementException("Timeout getting deployment status for deploymentId: " + deploymentId, null));
}
}).repeatWhenEmpty(
longFlux ->
longFlux
.flatMap(
index -> {
pollCount.set(index);
return Mono.delay(ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(pollDuration));
}));
} | SerializerAdapter serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter(); | public Mono<CsmDeploymentStatus> getDeploymentStatusAsync(String deploymentId) {
SerializerAdapter serializerAdapter = SerializerFactory.createDefaultManagementSerializerAdapter();
return this.manager().serviceClient().getWebApps()
.getProductionSiteDeploymentStatusWithResponseAsync(this.resourceGroupName(), this.name(), deploymentId)
.flatMap(fluxResponse -> {
HttpResponse response = new HttpFluxBBResponse(fluxResponse);
return response.getBodyAsString()
.flatMap(bodyString -> {
CsmDeploymentStatus status;
try {
status = serializerAdapter.deserialize(bodyString, CsmDeploymentStatus.class, SerializerEncoding.JSON);
} catch (IOException e) {
return Mono.error(new ManagementException("Deserialize failed for response body.", response));
}
return Mono.justOrEmpty(status);
});
});
} | class WebAppImpl extends AppServiceBaseImpl<WebApp, WebAppImpl, WebApp.DefinitionStages.WithCreate, WebApp.Update>
implements WebApp,
WebApp.Definition,
WebApp.DefinitionStages.ExistingWindowsPlanWithGroup,
WebApp.DefinitionStages.ExistingLinuxPlanWithGroup,
WebApp.Update,
WebApp.UpdateStages.WithCredentials,
WebApp.UpdateStages.WithStartUpCommand {
private static final Duration MAX_DEPLOYMENT_STATUS_TIMEOUT = Duration.ofMinutes(5);
private DeploymentSlots deploymentSlots;
private WebAppRuntimeStack runtimeStackOnWindowsOSToUpdate;
WebAppImpl(
String name,
SiteInner innerObject,
SiteConfigResourceInner siteConfig,
SiteLogsConfigInner logConfig,
AppServiceManager manager) {
super(name, innerObject, siteConfig, logConfig, manager);
}
@Override
public WebAppImpl update() {
runtimeStackOnWindowsOSToUpdate = null;
return super.update();
}
@Override
public DeploymentSlots deploymentSlots() {
if (deploymentSlots == null) {
deploymentSlots = new DeploymentSlotsImpl(this);
}
return deploymentSlots;
}
@Override
public WebAppImpl withBuiltInImage(RuntimeStack runtimeStack) {
ensureLinuxPlan();
cleanUpContainerSettings();
if (siteConfig == null) {
siteConfig = new SiteConfigResourceInner();
}
siteConfig.withLinuxFxVersion(String.format("%s|%s", runtimeStack.stack(), runtimeStack.version()));
if (runtimeStack.stack().equals("NODE")) {
siteConfig.withNodeVersion(runtimeStack.version());
}
if (runtimeStack.stack().equals("PHP")) {
siteConfig.withPhpVersion(runtimeStack.version());
}
if (runtimeStack.stack().equals("DOTNETCORE")) {
siteConfig.withNetFrameworkVersion(runtimeStack.version());
}
return this;
}
@Override
protected void cleanUpContainerSettings() {
if (siteConfig != null && siteConfig.linuxFxVersion() != null) {
siteConfig.withLinuxFxVersion(null);
}
if (siteConfig != null && siteConfig.windowsFxVersion() != null) {
siteConfig.withWindowsFxVersion(null);
}
if (siteConfig != null && siteConfig.phpVersion() != null) {
siteConfig.withPhpVersion(null);
}
if (siteConfig != null && siteConfig.nodeVersion() != null) {
siteConfig.withNodeVersion(null);
}
if (siteConfig != null && siteConfig.pythonVersion() != null) {
siteConfig.withPythonVersion(null);
}
if (siteConfig != null && siteConfig.javaVersion() != null) {
siteConfig.withJavaVersion(null);
}
if (siteConfig != null && siteConfig.netFrameworkVersion() != null) {
siteConfig.withNetFrameworkVersion("v4.0");
}
withoutAppSetting(SETTING_DOCKER_IMAGE);
withoutAppSetting(SETTING_REGISTRY_SERVER);
withoutAppSetting(SETTING_REGISTRY_USERNAME);
withoutAppSetting(SETTING_REGISTRY_PASSWORD);
}
@Override
public WebAppImpl withStartUpCommand(String startUpCommand) {
if (siteConfig == null) {
siteConfig = new SiteConfigResourceInner();
}
siteConfig.withAppCommandLine(startUpCommand);
return this;
}
@Override
public WebAppImpl withExistingWindowsPlan(AppServicePlan appServicePlan) {
return super.withExistingAppServicePlan(appServicePlan);
}
@Override
public WebAppImpl withExistingLinuxPlan(AppServicePlan appServicePlan) {
return super.withExistingAppServicePlan(appServicePlan);
}
@Override
public WebAppImpl withNewWindowsPlan(PricingTier pricingTier) {
return super.withNewAppServicePlan(OperatingSystem.WINDOWS, pricingTier);
}
@Override
public WebAppImpl withNewWindowsPlan(String appServicePlanName, PricingTier pricingTier) {
return super.withNewAppServicePlan(appServicePlanName, OperatingSystem.WINDOWS, pricingTier);
}
@Override
public WebAppImpl withNewWindowsPlan(Creatable<AppServicePlan> appServicePlanCreatable) {
return super.withNewAppServicePlan(appServicePlanCreatable);
}
@Override
public WebAppImpl withNewLinuxPlan(PricingTier pricingTier) {
return super.withNewAppServicePlan(OperatingSystem.LINUX, pricingTier);
}
@Override
public WebAppImpl withNewLinuxPlan(String appServicePlanName, PricingTier pricingTier) {
return super.withNewAppServicePlan(appServicePlanName, OperatingSystem.LINUX, pricingTier);
}
@Override
public WebAppImpl withNewLinuxPlan(Creatable<AppServicePlan> appServicePlanCreatable) {
return super.withNewAppServicePlan(appServicePlanCreatable);
}
@Override
public WebAppImpl withRuntimeStack(WebAppRuntimeStack runtimeStack) {
runtimeStackOnWindowsOSToUpdate = runtimeStack;
return this;
}
@Override
public Mono<Void> warDeployAsync(File warFile) {
return warDeployAsync(warFile, null);
}
@Override
public void warDeploy(File warFile) {
warDeployAsync(warFile).block();
}
@Override
public Mono<Void> warDeployAsync(InputStream warFile, long length) {
return warDeployAsync(warFile, length, null);
}
@Override
public void warDeploy(InputStream warFile, long length) {
warDeployAsync(warFile, length).block();
}
@Override
public Mono<Void> warDeployAsync(File warFile, String appName) {
try {
return kuduClient.warDeployAsync(warFile, appName);
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void warDeploy(File warFile, String appName) {
warDeployAsync(warFile, appName).block();
}
@Override
public void warDeploy(InputStream warFile, long length, String appName) {
warDeployAsync(warFile, length, appName).block();
}
@Override
public Mono<Void> warDeployAsync(InputStream warFile, long length, String appName) {
return kuduClient.warDeployAsync(warFile, length, appName);
}
@Override
public Mono<Void> zipDeployAsync(File zipFile) {
try {
return kuduClient.zipDeployAsync(zipFile);
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void zipDeploy(File zipFile) {
zipDeployAsync(zipFile).block();
}
@Override
public Mono<Void> zipDeployAsync(InputStream zipFile, long length) {
return kuduClient.zipDeployAsync(zipFile, length)
.then(WebAppImpl.this.stopAsync())
.then(WebAppImpl.this.startAsync());
}
@Override
public void zipDeploy(InputStream zipFile, long length) {
zipDeployAsync(zipFile, length).block();
}
@Override
Mono<Indexable> submitMetadata() {
Mono<Indexable> observable = super.submitMetadata();
if (runtimeStackOnWindowsOSToUpdate != null) {
observable =
observable
.then(listMetadata())
.switchIfEmpty(Mono.just(new StringDictionaryInner()))
.flatMap(
stringDictionaryInner -> {
if (stringDictionaryInner.properties() == null) {
stringDictionaryInner.withProperties(new HashMap<String, String>());
}
stringDictionaryInner
.properties()
.put("CURRENT_STACK", runtimeStackOnWindowsOSToUpdate.runtime());
return updateMetadata(stringDictionaryInner);
})
.then(
Mono
.fromCallable(
() -> {
runtimeStackOnWindowsOSToUpdate = null;
return WebAppImpl.this;
}));
}
return observable;
}
Mono<StringDictionaryInner> listMetadata() {
return this.manager().serviceClient().getWebApps().listMetadataAsync(resourceGroupName(), name());
}
Mono<StringDictionaryInner> updateMetadata(StringDictionaryInner inner) {
return this.manager().serviceClient().getWebApps().updateMetadataAsync(resourceGroupName(), name(), inner);
}
@Override
public void deploy(DeployType type, File file) {
deployAsync(type, file).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, File file) {
return deployAsync(type, file, new DeployOptions());
}
@Override
public void deploy(DeployType type, File file, DeployOptions deployOptions) {
deployAsync(type, file, deployOptions).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, File file, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
try {
return kuduClient.deployAsync(type, file,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment());
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void deploy(DeployType type, InputStream file, long length) {
deployAsync(type, file, length).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, InputStream file, long length) {
return deployAsync(type, file, length, new DeployOptions());
}
@Override
public void deploy(DeployType type, InputStream file, long length, DeployOptions deployOptions) {
deployAsync(type, file, length, deployOptions).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, InputStream file, long length, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
return kuduClient.deployAsync(type, file, length,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment());
}
@Override
public KuduDeploymentResult pushDeploy(DeployType type, File file, DeployOptions deployOptions) {
return pushDeployAsync(type, file, deployOptions).block();
}
@Override
public Mono<KuduDeploymentResult> pushDeployAsync(DeployType type, File file, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
try {
return kuduClient.pushDeployAsync(type, file,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment(),
deployOptions.trackDeployment());
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public CsmDeploymentStatus getDeploymentStatus(String deploymentId) {
return getDeploymentStatusAsync(deploymentId).block();
}
@Override
} | class WebAppImpl extends AppServiceBaseImpl<WebApp, WebAppImpl, WebApp.DefinitionStages.WithCreate, WebApp.Update>
implements WebApp,
WebApp.Definition,
WebApp.DefinitionStages.ExistingWindowsPlanWithGroup,
WebApp.DefinitionStages.ExistingLinuxPlanWithGroup,
WebApp.Update,
WebApp.UpdateStages.WithCredentials,
WebApp.UpdateStages.WithStartUpCommand {
private DeploymentSlots deploymentSlots;
private WebAppRuntimeStack runtimeStackOnWindowsOSToUpdate;
WebAppImpl(
String name,
SiteInner innerObject,
SiteConfigResourceInner siteConfig,
SiteLogsConfigInner logConfig,
AppServiceManager manager) {
super(name, innerObject, siteConfig, logConfig, manager);
}
@Override
public WebAppImpl update() {
runtimeStackOnWindowsOSToUpdate = null;
return super.update();
}
@Override
public DeploymentSlots deploymentSlots() {
if (deploymentSlots == null) {
deploymentSlots = new DeploymentSlotsImpl(this);
}
return deploymentSlots;
}
@Override
public WebAppImpl withBuiltInImage(RuntimeStack runtimeStack) {
ensureLinuxPlan();
cleanUpContainerSettings();
if (siteConfig == null) {
siteConfig = new SiteConfigResourceInner();
}
siteConfig.withLinuxFxVersion(String.format("%s|%s", runtimeStack.stack(), runtimeStack.version()));
if (runtimeStack.stack().equals("NODE")) {
siteConfig.withNodeVersion(runtimeStack.version());
}
if (runtimeStack.stack().equals("PHP")) {
siteConfig.withPhpVersion(runtimeStack.version());
}
if (runtimeStack.stack().equals("DOTNETCORE")) {
siteConfig.withNetFrameworkVersion(runtimeStack.version());
}
return this;
}
@Override
protected void cleanUpContainerSettings() {
if (siteConfig != null && siteConfig.linuxFxVersion() != null) {
siteConfig.withLinuxFxVersion(null);
}
if (siteConfig != null && siteConfig.windowsFxVersion() != null) {
siteConfig.withWindowsFxVersion(null);
}
if (siteConfig != null && siteConfig.phpVersion() != null) {
siteConfig.withPhpVersion(null);
}
if (siteConfig != null && siteConfig.nodeVersion() != null) {
siteConfig.withNodeVersion(null);
}
if (siteConfig != null && siteConfig.pythonVersion() != null) {
siteConfig.withPythonVersion(null);
}
if (siteConfig != null && siteConfig.javaVersion() != null) {
siteConfig.withJavaVersion(null);
}
if (siteConfig != null && siteConfig.netFrameworkVersion() != null) {
siteConfig.withNetFrameworkVersion("v4.0");
}
withoutAppSetting(SETTING_DOCKER_IMAGE);
withoutAppSetting(SETTING_REGISTRY_SERVER);
withoutAppSetting(SETTING_REGISTRY_USERNAME);
withoutAppSetting(SETTING_REGISTRY_PASSWORD);
}
@Override
public WebAppImpl withStartUpCommand(String startUpCommand) {
if (siteConfig == null) {
siteConfig = new SiteConfigResourceInner();
}
siteConfig.withAppCommandLine(startUpCommand);
return this;
}
@Override
public WebAppImpl withExistingWindowsPlan(AppServicePlan appServicePlan) {
return super.withExistingAppServicePlan(appServicePlan);
}
@Override
public WebAppImpl withExistingLinuxPlan(AppServicePlan appServicePlan) {
return super.withExistingAppServicePlan(appServicePlan);
}
@Override
public WebAppImpl withNewWindowsPlan(PricingTier pricingTier) {
return super.withNewAppServicePlan(OperatingSystem.WINDOWS, pricingTier);
}
@Override
public WebAppImpl withNewWindowsPlan(String appServicePlanName, PricingTier pricingTier) {
return super.withNewAppServicePlan(appServicePlanName, OperatingSystem.WINDOWS, pricingTier);
}
@Override
public WebAppImpl withNewWindowsPlan(Creatable<AppServicePlan> appServicePlanCreatable) {
return super.withNewAppServicePlan(appServicePlanCreatable);
}
@Override
public WebAppImpl withNewLinuxPlan(PricingTier pricingTier) {
return super.withNewAppServicePlan(OperatingSystem.LINUX, pricingTier);
}
@Override
public WebAppImpl withNewLinuxPlan(String appServicePlanName, PricingTier pricingTier) {
return super.withNewAppServicePlan(appServicePlanName, OperatingSystem.LINUX, pricingTier);
}
@Override
public WebAppImpl withNewLinuxPlan(Creatable<AppServicePlan> appServicePlanCreatable) {
return super.withNewAppServicePlan(appServicePlanCreatable);
}
@Override
public WebAppImpl withRuntimeStack(WebAppRuntimeStack runtimeStack) {
runtimeStackOnWindowsOSToUpdate = runtimeStack;
return this;
}
@Override
public Mono<Void> warDeployAsync(File warFile) {
return warDeployAsync(warFile, null);
}
@Override
public void warDeploy(File warFile) {
warDeployAsync(warFile).block();
}
@Override
public Mono<Void> warDeployAsync(InputStream warFile, long length) {
return warDeployAsync(warFile, length, null);
}
@Override
public void warDeploy(InputStream warFile, long length) {
warDeployAsync(warFile, length).block();
}
@Override
public Mono<Void> warDeployAsync(File warFile, String appName) {
try {
return kuduClient.warDeployAsync(warFile, appName);
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void warDeploy(File warFile, String appName) {
warDeployAsync(warFile, appName).block();
}
@Override
public void warDeploy(InputStream warFile, long length, String appName) {
warDeployAsync(warFile, length, appName).block();
}
@Override
public Mono<Void> warDeployAsync(InputStream warFile, long length, String appName) {
return kuduClient.warDeployAsync(warFile, length, appName);
}
@Override
public Mono<Void> zipDeployAsync(File zipFile) {
try {
return kuduClient.zipDeployAsync(zipFile);
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void zipDeploy(File zipFile) {
zipDeployAsync(zipFile).block();
}
@Override
public Mono<Void> zipDeployAsync(InputStream zipFile, long length) {
return kuduClient.zipDeployAsync(zipFile, length)
.then(WebAppImpl.this.stopAsync())
.then(WebAppImpl.this.startAsync());
}
@Override
public void zipDeploy(InputStream zipFile, long length) {
zipDeployAsync(zipFile, length).block();
}
@Override
Mono<Indexable> submitMetadata() {
Mono<Indexable> observable = super.submitMetadata();
if (runtimeStackOnWindowsOSToUpdate != null) {
observable =
observable
.then(listMetadata())
.switchIfEmpty(Mono.just(new StringDictionaryInner()))
.flatMap(
stringDictionaryInner -> {
if (stringDictionaryInner.properties() == null) {
stringDictionaryInner.withProperties(new HashMap<String, String>());
}
stringDictionaryInner
.properties()
.put("CURRENT_STACK", runtimeStackOnWindowsOSToUpdate.runtime());
return updateMetadata(stringDictionaryInner);
})
.then(
Mono
.fromCallable(
() -> {
runtimeStackOnWindowsOSToUpdate = null;
return WebAppImpl.this;
}));
}
return observable;
}
Mono<StringDictionaryInner> listMetadata() {
return this.manager().serviceClient().getWebApps().listMetadataAsync(resourceGroupName(), name());
}
Mono<StringDictionaryInner> updateMetadata(StringDictionaryInner inner) {
return this.manager().serviceClient().getWebApps().updateMetadataAsync(resourceGroupName(), name(), inner);
}
@Override
public void deploy(DeployType type, File file) {
deployAsync(type, file).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, File file) {
return deployAsync(type, file, new DeployOptions());
}
@Override
public void deploy(DeployType type, File file, DeployOptions deployOptions) {
deployAsync(type, file, deployOptions).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, File file, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
try {
return kuduClient.deployAsync(type, file,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment());
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void deploy(DeployType type, InputStream file, long length) {
deployAsync(type, file, length).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, InputStream file, long length) {
return deployAsync(type, file, length, new DeployOptions());
}
@Override
public void deploy(DeployType type, InputStream file, long length, DeployOptions deployOptions) {
deployAsync(type, file, length, deployOptions).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, InputStream file, long length, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
return kuduClient.deployAsync(type, file, length,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment());
}
@Override
public KuduDeploymentResult pushDeploy(DeployType type, File file, DeployOptions deployOptions) {
return pushDeployAsync(type, file, deployOptions).block();
}
@Override
public Mono<KuduDeploymentResult> pushDeployAsync(DeployType type, File file, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
try {
return kuduClient.pushDeployAsync(type, file,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment(),
deployOptions.trackDeployment());
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public CsmDeploymentStatus getDeploymentStatus(String deploymentId) {
return getDeploymentStatusAsync(deploymentId).block();
}
@Override
} |
nit, Mono.justOrEmpty | public Mono<CsmDeploymentStatus> getDeploymentStatusAsync(String deploymentId) {
AtomicLong pollCount = new AtomicLong();
Duration pollDuration = manager().serviceClient().getDefaultPollInterval();
return this.manager().serviceClient().getWebApps()
.getProductionSiteDeploymentStatusWithResponseAsync(this.resourceGroupName(), this.name(), deploymentId)
.flatMap(fluxResponse -> {
if (pollDuration.multipliedBy(pollCount.get()).compareTo(MAX_DEPLOYMENT_STATUS_TIMEOUT) < 0) {
HttpResponse response = new HttpFluxBBResponse(fluxResponse);
if (fluxResponse.getStatusCode() / 100 != 2) {
return Mono.error(new ManagementException("Service responds with a non-20x response.", response));
}
return response.getBodyAsString()
.flatMap(bodyString -> {
SerializerAdapter serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter();
CsmDeploymentStatus status;
try {
status = serializerAdapter.deserialize(bodyString, CsmDeploymentStatus.class, SerializerEncoding.JSON);
} catch (IOException e) {
return Mono.error(new ManagementException("Deserialize failed for response body.", response));
}
if (status == null) {
return Mono.empty();
}
return Mono.just(status);
});
} else {
return Mono.error(new ManagementException("Timeout getting deployment status for deploymentId: " + deploymentId, null));
}
}).repeatWhenEmpty(
longFlux ->
longFlux
.flatMap(
index -> {
pollCount.set(index);
return Mono.delay(ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(pollDuration));
}));
} | return Mono.just(status); | public Mono<CsmDeploymentStatus> getDeploymentStatusAsync(String deploymentId) {
SerializerAdapter serializerAdapter = SerializerFactory.createDefaultManagementSerializerAdapter();
return this.manager().serviceClient().getWebApps()
.getProductionSiteDeploymentStatusWithResponseAsync(this.resourceGroupName(), this.name(), deploymentId)
.flatMap(fluxResponse -> {
HttpResponse response = new HttpFluxBBResponse(fluxResponse);
return response.getBodyAsString()
.flatMap(bodyString -> {
CsmDeploymentStatus status;
try {
status = serializerAdapter.deserialize(bodyString, CsmDeploymentStatus.class, SerializerEncoding.JSON);
} catch (IOException e) {
return Mono.error(new ManagementException("Deserialize failed for response body.", response));
}
return Mono.justOrEmpty(status);
});
});
} | class WebAppImpl extends AppServiceBaseImpl<WebApp, WebAppImpl, WebApp.DefinitionStages.WithCreate, WebApp.Update>
implements WebApp,
WebApp.Definition,
WebApp.DefinitionStages.ExistingWindowsPlanWithGroup,
WebApp.DefinitionStages.ExistingLinuxPlanWithGroup,
WebApp.Update,
WebApp.UpdateStages.WithCredentials,
WebApp.UpdateStages.WithStartUpCommand {
private static final Duration MAX_DEPLOYMENT_STATUS_TIMEOUT = Duration.ofMinutes(5);
private DeploymentSlots deploymentSlots;
private WebAppRuntimeStack runtimeStackOnWindowsOSToUpdate;
WebAppImpl(
String name,
SiteInner innerObject,
SiteConfigResourceInner siteConfig,
SiteLogsConfigInner logConfig,
AppServiceManager manager) {
super(name, innerObject, siteConfig, logConfig, manager);
}
@Override
public WebAppImpl update() {
runtimeStackOnWindowsOSToUpdate = null;
return super.update();
}
@Override
public DeploymentSlots deploymentSlots() {
if (deploymentSlots == null) {
deploymentSlots = new DeploymentSlotsImpl(this);
}
return deploymentSlots;
}
@Override
public WebAppImpl withBuiltInImage(RuntimeStack runtimeStack) {
ensureLinuxPlan();
cleanUpContainerSettings();
if (siteConfig == null) {
siteConfig = new SiteConfigResourceInner();
}
siteConfig.withLinuxFxVersion(String.format("%s|%s", runtimeStack.stack(), runtimeStack.version()));
if (runtimeStack.stack().equals("NODE")) {
siteConfig.withNodeVersion(runtimeStack.version());
}
if (runtimeStack.stack().equals("PHP")) {
siteConfig.withPhpVersion(runtimeStack.version());
}
if (runtimeStack.stack().equals("DOTNETCORE")) {
siteConfig.withNetFrameworkVersion(runtimeStack.version());
}
return this;
}
@Override
protected void cleanUpContainerSettings() {
if (siteConfig != null && siteConfig.linuxFxVersion() != null) {
siteConfig.withLinuxFxVersion(null);
}
if (siteConfig != null && siteConfig.windowsFxVersion() != null) {
siteConfig.withWindowsFxVersion(null);
}
if (siteConfig != null && siteConfig.phpVersion() != null) {
siteConfig.withPhpVersion(null);
}
if (siteConfig != null && siteConfig.nodeVersion() != null) {
siteConfig.withNodeVersion(null);
}
if (siteConfig != null && siteConfig.pythonVersion() != null) {
siteConfig.withPythonVersion(null);
}
if (siteConfig != null && siteConfig.javaVersion() != null) {
siteConfig.withJavaVersion(null);
}
if (siteConfig != null && siteConfig.netFrameworkVersion() != null) {
siteConfig.withNetFrameworkVersion("v4.0");
}
withoutAppSetting(SETTING_DOCKER_IMAGE);
withoutAppSetting(SETTING_REGISTRY_SERVER);
withoutAppSetting(SETTING_REGISTRY_USERNAME);
withoutAppSetting(SETTING_REGISTRY_PASSWORD);
}
@Override
public WebAppImpl withStartUpCommand(String startUpCommand) {
if (siteConfig == null) {
siteConfig = new SiteConfigResourceInner();
}
siteConfig.withAppCommandLine(startUpCommand);
return this;
}
@Override
public WebAppImpl withExistingWindowsPlan(AppServicePlan appServicePlan) {
return super.withExistingAppServicePlan(appServicePlan);
}
@Override
public WebAppImpl withExistingLinuxPlan(AppServicePlan appServicePlan) {
return super.withExistingAppServicePlan(appServicePlan);
}
@Override
public WebAppImpl withNewWindowsPlan(PricingTier pricingTier) {
return super.withNewAppServicePlan(OperatingSystem.WINDOWS, pricingTier);
}
@Override
public WebAppImpl withNewWindowsPlan(String appServicePlanName, PricingTier pricingTier) {
return super.withNewAppServicePlan(appServicePlanName, OperatingSystem.WINDOWS, pricingTier);
}
@Override
public WebAppImpl withNewWindowsPlan(Creatable<AppServicePlan> appServicePlanCreatable) {
return super.withNewAppServicePlan(appServicePlanCreatable);
}
@Override
public WebAppImpl withNewLinuxPlan(PricingTier pricingTier) {
return super.withNewAppServicePlan(OperatingSystem.LINUX, pricingTier);
}
@Override
public WebAppImpl withNewLinuxPlan(String appServicePlanName, PricingTier pricingTier) {
return super.withNewAppServicePlan(appServicePlanName, OperatingSystem.LINUX, pricingTier);
}
@Override
public WebAppImpl withNewLinuxPlan(Creatable<AppServicePlan> appServicePlanCreatable) {
return super.withNewAppServicePlan(appServicePlanCreatable);
}
@Override
public WebAppImpl withRuntimeStack(WebAppRuntimeStack runtimeStack) {
runtimeStackOnWindowsOSToUpdate = runtimeStack;
return this;
}
@Override
public Mono<Void> warDeployAsync(File warFile) {
return warDeployAsync(warFile, null);
}
@Override
public void warDeploy(File warFile) {
warDeployAsync(warFile).block();
}
@Override
public Mono<Void> warDeployAsync(InputStream warFile, long length) {
return warDeployAsync(warFile, length, null);
}
@Override
public void warDeploy(InputStream warFile, long length) {
warDeployAsync(warFile, length).block();
}
@Override
public Mono<Void> warDeployAsync(File warFile, String appName) {
try {
return kuduClient.warDeployAsync(warFile, appName);
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void warDeploy(File warFile, String appName) {
warDeployAsync(warFile, appName).block();
}
@Override
public void warDeploy(InputStream warFile, long length, String appName) {
warDeployAsync(warFile, length, appName).block();
}
@Override
public Mono<Void> warDeployAsync(InputStream warFile, long length, String appName) {
return kuduClient.warDeployAsync(warFile, length, appName);
}
@Override
public Mono<Void> zipDeployAsync(File zipFile) {
try {
return kuduClient.zipDeployAsync(zipFile);
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void zipDeploy(File zipFile) {
zipDeployAsync(zipFile).block();
}
@Override
public Mono<Void> zipDeployAsync(InputStream zipFile, long length) {
return kuduClient.zipDeployAsync(zipFile, length)
.then(WebAppImpl.this.stopAsync())
.then(WebAppImpl.this.startAsync());
}
@Override
public void zipDeploy(InputStream zipFile, long length) {
zipDeployAsync(zipFile, length).block();
}
@Override
Mono<Indexable> submitMetadata() {
Mono<Indexable> observable = super.submitMetadata();
if (runtimeStackOnWindowsOSToUpdate != null) {
observable =
observable
.then(listMetadata())
.switchIfEmpty(Mono.just(new StringDictionaryInner()))
.flatMap(
stringDictionaryInner -> {
if (stringDictionaryInner.properties() == null) {
stringDictionaryInner.withProperties(new HashMap<String, String>());
}
stringDictionaryInner
.properties()
.put("CURRENT_STACK", runtimeStackOnWindowsOSToUpdate.runtime());
return updateMetadata(stringDictionaryInner);
})
.then(
Mono
.fromCallable(
() -> {
runtimeStackOnWindowsOSToUpdate = null;
return WebAppImpl.this;
}));
}
return observable;
}
Mono<StringDictionaryInner> listMetadata() {
return this.manager().serviceClient().getWebApps().listMetadataAsync(resourceGroupName(), name());
}
Mono<StringDictionaryInner> updateMetadata(StringDictionaryInner inner) {
return this.manager().serviceClient().getWebApps().updateMetadataAsync(resourceGroupName(), name(), inner);
}
@Override
public void deploy(DeployType type, File file) {
deployAsync(type, file).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, File file) {
return deployAsync(type, file, new DeployOptions());
}
@Override
public void deploy(DeployType type, File file, DeployOptions deployOptions) {
deployAsync(type, file, deployOptions).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, File file, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
try {
return kuduClient.deployAsync(type, file,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment());
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void deploy(DeployType type, InputStream file, long length) {
deployAsync(type, file, length).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, InputStream file, long length) {
return deployAsync(type, file, length, new DeployOptions());
}
@Override
public void deploy(DeployType type, InputStream file, long length, DeployOptions deployOptions) {
deployAsync(type, file, length, deployOptions).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, InputStream file, long length, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
return kuduClient.deployAsync(type, file, length,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment());
}
@Override
public KuduDeploymentResult pushDeploy(DeployType type, File file, DeployOptions deployOptions) {
return pushDeployAsync(type, file, deployOptions).block();
}
@Override
public Mono<KuduDeploymentResult> pushDeployAsync(DeployType type, File file, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
try {
return kuduClient.pushDeployAsync(type, file,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment(),
deployOptions.trackDeployment());
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public CsmDeploymentStatus getDeploymentStatus(String deploymentId) {
return getDeploymentStatusAsync(deploymentId).block();
}
@Override
} | class WebAppImpl extends AppServiceBaseImpl<WebApp, WebAppImpl, WebApp.DefinitionStages.WithCreate, WebApp.Update>
implements WebApp,
WebApp.Definition,
WebApp.DefinitionStages.ExistingWindowsPlanWithGroup,
WebApp.DefinitionStages.ExistingLinuxPlanWithGroup,
WebApp.Update,
WebApp.UpdateStages.WithCredentials,
WebApp.UpdateStages.WithStartUpCommand {
private DeploymentSlots deploymentSlots;
private WebAppRuntimeStack runtimeStackOnWindowsOSToUpdate;
WebAppImpl(
String name,
SiteInner innerObject,
SiteConfigResourceInner siteConfig,
SiteLogsConfigInner logConfig,
AppServiceManager manager) {
super(name, innerObject, siteConfig, logConfig, manager);
}
@Override
public WebAppImpl update() {
runtimeStackOnWindowsOSToUpdate = null;
return super.update();
}
@Override
public DeploymentSlots deploymentSlots() {
if (deploymentSlots == null) {
deploymentSlots = new DeploymentSlotsImpl(this);
}
return deploymentSlots;
}
@Override
public WebAppImpl withBuiltInImage(RuntimeStack runtimeStack) {
ensureLinuxPlan();
cleanUpContainerSettings();
if (siteConfig == null) {
siteConfig = new SiteConfigResourceInner();
}
siteConfig.withLinuxFxVersion(String.format("%s|%s", runtimeStack.stack(), runtimeStack.version()));
if (runtimeStack.stack().equals("NODE")) {
siteConfig.withNodeVersion(runtimeStack.version());
}
if (runtimeStack.stack().equals("PHP")) {
siteConfig.withPhpVersion(runtimeStack.version());
}
if (runtimeStack.stack().equals("DOTNETCORE")) {
siteConfig.withNetFrameworkVersion(runtimeStack.version());
}
return this;
}
@Override
protected void cleanUpContainerSettings() {
if (siteConfig != null && siteConfig.linuxFxVersion() != null) {
siteConfig.withLinuxFxVersion(null);
}
if (siteConfig != null && siteConfig.windowsFxVersion() != null) {
siteConfig.withWindowsFxVersion(null);
}
if (siteConfig != null && siteConfig.phpVersion() != null) {
siteConfig.withPhpVersion(null);
}
if (siteConfig != null && siteConfig.nodeVersion() != null) {
siteConfig.withNodeVersion(null);
}
if (siteConfig != null && siteConfig.pythonVersion() != null) {
siteConfig.withPythonVersion(null);
}
if (siteConfig != null && siteConfig.javaVersion() != null) {
siteConfig.withJavaVersion(null);
}
if (siteConfig != null && siteConfig.netFrameworkVersion() != null) {
siteConfig.withNetFrameworkVersion("v4.0");
}
withoutAppSetting(SETTING_DOCKER_IMAGE);
withoutAppSetting(SETTING_REGISTRY_SERVER);
withoutAppSetting(SETTING_REGISTRY_USERNAME);
withoutAppSetting(SETTING_REGISTRY_PASSWORD);
}
@Override
public WebAppImpl withStartUpCommand(String startUpCommand) {
if (siteConfig == null) {
siteConfig = new SiteConfigResourceInner();
}
siteConfig.withAppCommandLine(startUpCommand);
return this;
}
@Override
public WebAppImpl withExistingWindowsPlan(AppServicePlan appServicePlan) {
return super.withExistingAppServicePlan(appServicePlan);
}
@Override
public WebAppImpl withExistingLinuxPlan(AppServicePlan appServicePlan) {
return super.withExistingAppServicePlan(appServicePlan);
}
@Override
public WebAppImpl withNewWindowsPlan(PricingTier pricingTier) {
return super.withNewAppServicePlan(OperatingSystem.WINDOWS, pricingTier);
}
@Override
public WebAppImpl withNewWindowsPlan(String appServicePlanName, PricingTier pricingTier) {
return super.withNewAppServicePlan(appServicePlanName, OperatingSystem.WINDOWS, pricingTier);
}
@Override
public WebAppImpl withNewWindowsPlan(Creatable<AppServicePlan> appServicePlanCreatable) {
return super.withNewAppServicePlan(appServicePlanCreatable);
}
@Override
public WebAppImpl withNewLinuxPlan(PricingTier pricingTier) {
return super.withNewAppServicePlan(OperatingSystem.LINUX, pricingTier);
}
@Override
public WebAppImpl withNewLinuxPlan(String appServicePlanName, PricingTier pricingTier) {
return super.withNewAppServicePlan(appServicePlanName, OperatingSystem.LINUX, pricingTier);
}
@Override
public WebAppImpl withNewLinuxPlan(Creatable<AppServicePlan> appServicePlanCreatable) {
return super.withNewAppServicePlan(appServicePlanCreatable);
}
@Override
public WebAppImpl withRuntimeStack(WebAppRuntimeStack runtimeStack) {
runtimeStackOnWindowsOSToUpdate = runtimeStack;
return this;
}
@Override
public Mono<Void> warDeployAsync(File warFile) {
return warDeployAsync(warFile, null);
}
@Override
public void warDeploy(File warFile) {
warDeployAsync(warFile).block();
}
@Override
public Mono<Void> warDeployAsync(InputStream warFile, long length) {
return warDeployAsync(warFile, length, null);
}
@Override
public void warDeploy(InputStream warFile, long length) {
warDeployAsync(warFile, length).block();
}
@Override
public Mono<Void> warDeployAsync(File warFile, String appName) {
try {
return kuduClient.warDeployAsync(warFile, appName);
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void warDeploy(File warFile, String appName) {
warDeployAsync(warFile, appName).block();
}
@Override
public void warDeploy(InputStream warFile, long length, String appName) {
warDeployAsync(warFile, length, appName).block();
}
@Override
public Mono<Void> warDeployAsync(InputStream warFile, long length, String appName) {
return kuduClient.warDeployAsync(warFile, length, appName);
}
@Override
public Mono<Void> zipDeployAsync(File zipFile) {
try {
return kuduClient.zipDeployAsync(zipFile);
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void zipDeploy(File zipFile) {
zipDeployAsync(zipFile).block();
}
@Override
public Mono<Void> zipDeployAsync(InputStream zipFile, long length) {
return kuduClient.zipDeployAsync(zipFile, length)
.then(WebAppImpl.this.stopAsync())
.then(WebAppImpl.this.startAsync());
}
@Override
public void zipDeploy(InputStream zipFile, long length) {
zipDeployAsync(zipFile, length).block();
}
@Override
Mono<Indexable> submitMetadata() {
Mono<Indexable> observable = super.submitMetadata();
if (runtimeStackOnWindowsOSToUpdate != null) {
observable =
observable
.then(listMetadata())
.switchIfEmpty(Mono.just(new StringDictionaryInner()))
.flatMap(
stringDictionaryInner -> {
if (stringDictionaryInner.properties() == null) {
stringDictionaryInner.withProperties(new HashMap<String, String>());
}
stringDictionaryInner
.properties()
.put("CURRENT_STACK", runtimeStackOnWindowsOSToUpdate.runtime());
return updateMetadata(stringDictionaryInner);
})
.then(
Mono
.fromCallable(
() -> {
runtimeStackOnWindowsOSToUpdate = null;
return WebAppImpl.this;
}));
}
return observable;
}
Mono<StringDictionaryInner> listMetadata() {
return this.manager().serviceClient().getWebApps().listMetadataAsync(resourceGroupName(), name());
}
Mono<StringDictionaryInner> updateMetadata(StringDictionaryInner inner) {
return this.manager().serviceClient().getWebApps().updateMetadataAsync(resourceGroupName(), name(), inner);
}
@Override
public void deploy(DeployType type, File file) {
deployAsync(type, file).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, File file) {
return deployAsync(type, file, new DeployOptions());
}
@Override
public void deploy(DeployType type, File file, DeployOptions deployOptions) {
deployAsync(type, file, deployOptions).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, File file, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
try {
return kuduClient.deployAsync(type, file,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment());
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void deploy(DeployType type, InputStream file, long length) {
deployAsync(type, file, length).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, InputStream file, long length) {
return deployAsync(type, file, length, new DeployOptions());
}
@Override
public void deploy(DeployType type, InputStream file, long length, DeployOptions deployOptions) {
deployAsync(type, file, length, deployOptions).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, InputStream file, long length, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
return kuduClient.deployAsync(type, file, length,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment());
}
@Override
public KuduDeploymentResult pushDeploy(DeployType type, File file, DeployOptions deployOptions) {
return pushDeployAsync(type, file, deployOptions).block();
}
@Override
public Mono<KuduDeploymentResult> pushDeployAsync(DeployType type, File file, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
try {
return kuduClient.pushDeployAsync(type, file,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment(),
deployOptions.trackDeployment());
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public CsmDeploymentStatus getDeploymentStatus(String deploymentId) {
return getDeploymentStatusAsync(deploymentId).block();
}
@Override
} |
wrong API, it should be the "slot" one. | public Mono<CsmDeploymentStatus> getDeploymentStatusAsync(String deploymentId) {
SerializerAdapter serializerAdapter = SerializerFactory.createDefaultManagementSerializerAdapter();
return this.manager().serviceClient().getWebApps()
.getProductionSiteDeploymentStatusWithResponseAsync(this.resourceGroupName(), this.name(), deploymentId)
.flatMap(fluxResponse -> {
HttpResponse response = new HttpFluxBBResponse(fluxResponse);
if (fluxResponse.getStatusCode() / 100 != 2) {
return Mono.error(new ManagementException("Service responds with a non-20x response.", response));
}
return response.getBodyAsString()
.flatMap(bodyString -> {
CsmDeploymentStatus status;
try {
status = serializerAdapter.deserialize(bodyString, CsmDeploymentStatus.class, SerializerEncoding.JSON);
} catch (IOException e) {
return Mono.error(new ManagementException("Deserialize failed for response body.", response));
}
return Mono.justOrEmpty(status);
});
});
} | .getProductionSiteDeploymentStatusWithResponseAsync(this.resourceGroupName(), this.name(), deploymentId) | public Mono<CsmDeploymentStatus> getDeploymentStatusAsync(String deploymentId) {
SerializerAdapter serializerAdapter = SerializerFactory.createDefaultManagementSerializerAdapter();
return this.manager().serviceClient().getWebApps()
.getSlotSiteDeploymentStatusSlotWithResponseAsync(this.resourceGroupName(), this.parent().name(), this.name(), deploymentId)
.flatMap(fluxResponse -> {
HttpResponse response = new HttpFluxBBResponse(fluxResponse);
return response.getBodyAsString()
.flatMap(bodyString -> {
CsmDeploymentStatus status;
try {
status = serializerAdapter.deserialize(bodyString, CsmDeploymentStatus.class, SerializerEncoding.JSON);
} catch (IOException e) {
return Mono.error(new ManagementException("Deserialize failed for response body.", response));
}
return Mono.justOrEmpty(status);
});
});
} | class DeploymentSlotImpl
extends DeploymentSlotBaseImpl<
DeploymentSlot,
DeploymentSlotImpl,
WebAppImpl,
DeploymentSlot.DefinitionStages.WithCreate,
DeploymentSlotBase.Update<DeploymentSlot>>
implements DeploymentSlot, DeploymentSlot.Definition {
private static final Duration MAX_DEPLOYMENT_STATUS_TIMEOUT = Duration.ofMinutes(5);
DeploymentSlotImpl(
String name,
SiteInner innerObject,
SiteConfigResourceInner siteConfig,
SiteLogsConfigInner logConfig,
WebAppImpl parent) {
super(name, innerObject, siteConfig, logConfig, parent);
}
@Override
public DeploymentSlotImpl withConfigurationFromParent() {
return withConfigurationFromWebApp(this.parent());
}
@Override
public DeploymentSlotImpl withConfigurationFromWebApp(WebApp webApp) {
this.siteConfig = ((WebAppBaseImpl) webApp).siteConfig;
configurationSource = webApp;
return this;
}
@Override
public Mono<Void> warDeployAsync(File warFile) {
return warDeployAsync(warFile, null);
}
@Override
public void warDeploy(File warFile) {
warDeployAsync(warFile).block();
}
@Override
public Mono<Void> warDeployAsync(InputStream warFile, long length) {
return warDeployAsync(warFile, length, null);
}
@Override
public void warDeploy(InputStream warFile, long length) {
warDeployAsync(warFile, length).block();
}
@Override
public Mono<Void> warDeployAsync(File warFile, String appName) {
try {
return kuduClient.warDeployAsync(warFile, appName);
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void warDeploy(File warFile, String appName) {
warDeployAsync(warFile, appName).block();
}
@Override
public Mono<Void> warDeployAsync(InputStream warFile, long length, String appName) {
return kuduClient.warDeployAsync(warFile, length, appName);
}
@Override
public void warDeploy(InputStream warFile, long length, String appName) {
warDeployAsync(warFile, length, appName).block();
}
@Override
public void zipDeploy(File zipFile) {
zipDeployAsync(zipFile).block();
}
@Override
public void zipDeploy(InputStream zipFile, long length) {
zipDeployAsync(zipFile, length).block();
}
@Override
public Mono<Void> zipDeployAsync(InputStream zipFile, long length) {
return kuduClient.zipDeployAsync(zipFile, length).then(stopAsync()).then(startAsync());
}
@Override
public Mono<Void> zipDeployAsync(File zipFile) {
try {
return kuduClient.zipDeployAsync(zipFile);
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void deploy(DeployType type, File file) {
deployAsync(type, file).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, File file) {
return deployAsync(type, file, new DeployOptions());
}
@Override
public void deploy(DeployType type, File file, DeployOptions deployOptions) {
deployAsync(type, file, deployOptions).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, File file, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
try {
return kuduClient.deployAsync(type, file,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment());
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void deploy(DeployType type, InputStream file, long length) {
deployAsync(type, file, length).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, InputStream file, long length) {
return deployAsync(type, file, length, new DeployOptions());
}
@Override
public void deploy(DeployType type, InputStream file, long length, DeployOptions deployOptions) {
deployAsync(type, file, length, deployOptions).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, InputStream file, long length, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
return kuduClient.deployAsync(type, file, length,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment());
}
@Override
public KuduDeploymentResult pushDeploy(DeployType type, File file, DeployOptions deployOptions) {
return pushDeployAsync(type, file, deployOptions).block();
}
@Override
public Mono<KuduDeploymentResult> pushDeployAsync(DeployType type, File file, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
try {
return kuduClient.pushDeployAsync(type, file,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment(),
deployOptions.trackDeployment());
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public CsmDeploymentStatus getDeploymentStatus(String deploymentId) {
return getDeploymentStatusAsync(deploymentId).block();
}
@Override
} | class DeploymentSlotImpl
extends DeploymentSlotBaseImpl<
DeploymentSlot,
DeploymentSlotImpl,
WebAppImpl,
DeploymentSlot.DefinitionStages.WithCreate,
DeploymentSlotBase.Update<DeploymentSlot>>
implements DeploymentSlot, DeploymentSlot.Definition {
DeploymentSlotImpl(
String name,
SiteInner innerObject,
SiteConfigResourceInner siteConfig,
SiteLogsConfigInner logConfig,
WebAppImpl parent) {
super(name, innerObject, siteConfig, logConfig, parent);
}
@Override
public DeploymentSlotImpl withConfigurationFromParent() {
return withConfigurationFromWebApp(this.parent());
}
@Override
public DeploymentSlotImpl withConfigurationFromWebApp(WebApp webApp) {
this.siteConfig = ((WebAppBaseImpl) webApp).siteConfig;
configurationSource = webApp;
return this;
}
@Override
public Mono<Void> warDeployAsync(File warFile) {
return warDeployAsync(warFile, null);
}
@Override
public void warDeploy(File warFile) {
warDeployAsync(warFile).block();
}
@Override
public Mono<Void> warDeployAsync(InputStream warFile, long length) {
return warDeployAsync(warFile, length, null);
}
@Override
public void warDeploy(InputStream warFile, long length) {
warDeployAsync(warFile, length).block();
}
@Override
public Mono<Void> warDeployAsync(File warFile, String appName) {
try {
return kuduClient.warDeployAsync(warFile, appName);
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void warDeploy(File warFile, String appName) {
warDeployAsync(warFile, appName).block();
}
@Override
public Mono<Void> warDeployAsync(InputStream warFile, long length, String appName) {
return kuduClient.warDeployAsync(warFile, length, appName);
}
@Override
public void warDeploy(InputStream warFile, long length, String appName) {
warDeployAsync(warFile, length, appName).block();
}
@Override
public void zipDeploy(File zipFile) {
zipDeployAsync(zipFile).block();
}
@Override
public void zipDeploy(InputStream zipFile, long length) {
zipDeployAsync(zipFile, length).block();
}
@Override
public Mono<Void> zipDeployAsync(InputStream zipFile, long length) {
return kuduClient.zipDeployAsync(zipFile, length).then(stopAsync()).then(startAsync());
}
@Override
public Mono<Void> zipDeployAsync(File zipFile) {
try {
return kuduClient.zipDeployAsync(zipFile);
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void deploy(DeployType type, File file) {
deployAsync(type, file).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, File file) {
return deployAsync(type, file, new DeployOptions());
}
@Override
public void deploy(DeployType type, File file, DeployOptions deployOptions) {
deployAsync(type, file, deployOptions).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, File file, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
try {
return kuduClient.deployAsync(type, file,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment());
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void deploy(DeployType type, InputStream file, long length) {
deployAsync(type, file, length).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, InputStream file, long length) {
return deployAsync(type, file, length, new DeployOptions());
}
@Override
public void deploy(DeployType type, InputStream file, long length, DeployOptions deployOptions) {
deployAsync(type, file, length, deployOptions).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, InputStream file, long length, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
return kuduClient.deployAsync(type, file, length,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment());
}
@Override
public KuduDeploymentResult pushDeploy(DeployType type, File file, DeployOptions deployOptions) {
return pushDeployAsync(type, file, deployOptions).block();
}
@Override
public Mono<KuduDeploymentResult> pushDeployAsync(DeployType type, File file, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
try {
return kuduClient.pushDeployAsync(type, file,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment(),
deployOptions.trackDeployment());
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public CsmDeploymentStatus getDeploymentStatus(String deploymentId) {
return getDeploymentStatusAsync(deploymentId).block();
}
@Override
} |
I think the client API should already handle the error case (actually the underlying proxy method). | public Mono<CsmDeploymentStatus> getDeploymentStatusAsync(String deploymentId) {
SerializerAdapter serializerAdapter = SerializerFactory.createDefaultManagementSerializerAdapter();
return this.manager().serviceClient().getWebApps()
.getProductionSiteDeploymentStatusWithResponseAsync(this.resourceGroupName(), this.name(), deploymentId)
.flatMap(fluxResponse -> {
HttpResponse response = new HttpFluxBBResponse(fluxResponse);
if (fluxResponse.getStatusCode() / 100 != 2) {
return Mono.error(new ManagementException("Service responds with a non-20x response.", response));
}
return response.getBodyAsString()
.flatMap(bodyString -> {
CsmDeploymentStatus status;
try {
status = serializerAdapter.deserialize(bodyString, CsmDeploymentStatus.class, SerializerEncoding.JSON);
} catch (IOException e) {
return Mono.error(new ManagementException("Deserialize failed for response body.", response));
}
return Mono.justOrEmpty(status);
});
});
} | } | public Mono<CsmDeploymentStatus> getDeploymentStatusAsync(String deploymentId) {
SerializerAdapter serializerAdapter = SerializerFactory.createDefaultManagementSerializerAdapter();
return this.manager().serviceClient().getWebApps()
.getSlotSiteDeploymentStatusSlotWithResponseAsync(this.resourceGroupName(), this.parent().name(), this.name(), deploymentId)
.flatMap(fluxResponse -> {
HttpResponse response = new HttpFluxBBResponse(fluxResponse);
return response.getBodyAsString()
.flatMap(bodyString -> {
CsmDeploymentStatus status;
try {
status = serializerAdapter.deserialize(bodyString, CsmDeploymentStatus.class, SerializerEncoding.JSON);
} catch (IOException e) {
return Mono.error(new ManagementException("Deserialize failed for response body.", response));
}
return Mono.justOrEmpty(status);
});
});
} | class DeploymentSlotImpl
extends DeploymentSlotBaseImpl<
DeploymentSlot,
DeploymentSlotImpl,
WebAppImpl,
DeploymentSlot.DefinitionStages.WithCreate,
DeploymentSlotBase.Update<DeploymentSlot>>
implements DeploymentSlot, DeploymentSlot.Definition {
private static final Duration MAX_DEPLOYMENT_STATUS_TIMEOUT = Duration.ofMinutes(5);
DeploymentSlotImpl(
String name,
SiteInner innerObject,
SiteConfigResourceInner siteConfig,
SiteLogsConfigInner logConfig,
WebAppImpl parent) {
super(name, innerObject, siteConfig, logConfig, parent);
}
@Override
public DeploymentSlotImpl withConfigurationFromParent() {
return withConfigurationFromWebApp(this.parent());
}
@Override
public DeploymentSlotImpl withConfigurationFromWebApp(WebApp webApp) {
this.siteConfig = ((WebAppBaseImpl) webApp).siteConfig;
configurationSource = webApp;
return this;
}
@Override
public Mono<Void> warDeployAsync(File warFile) {
return warDeployAsync(warFile, null);
}
@Override
public void warDeploy(File warFile) {
warDeployAsync(warFile).block();
}
@Override
public Mono<Void> warDeployAsync(InputStream warFile, long length) {
return warDeployAsync(warFile, length, null);
}
@Override
public void warDeploy(InputStream warFile, long length) {
warDeployAsync(warFile, length).block();
}
@Override
public Mono<Void> warDeployAsync(File warFile, String appName) {
try {
return kuduClient.warDeployAsync(warFile, appName);
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void warDeploy(File warFile, String appName) {
warDeployAsync(warFile, appName).block();
}
@Override
public Mono<Void> warDeployAsync(InputStream warFile, long length, String appName) {
return kuduClient.warDeployAsync(warFile, length, appName);
}
@Override
public void warDeploy(InputStream warFile, long length, String appName) {
warDeployAsync(warFile, length, appName).block();
}
@Override
public void zipDeploy(File zipFile) {
zipDeployAsync(zipFile).block();
}
@Override
public void zipDeploy(InputStream zipFile, long length) {
zipDeployAsync(zipFile, length).block();
}
@Override
public Mono<Void> zipDeployAsync(InputStream zipFile, long length) {
return kuduClient.zipDeployAsync(zipFile, length).then(stopAsync()).then(startAsync());
}
@Override
public Mono<Void> zipDeployAsync(File zipFile) {
try {
return kuduClient.zipDeployAsync(zipFile);
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void deploy(DeployType type, File file) {
deployAsync(type, file).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, File file) {
return deployAsync(type, file, new DeployOptions());
}
@Override
public void deploy(DeployType type, File file, DeployOptions deployOptions) {
deployAsync(type, file, deployOptions).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, File file, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
try {
return kuduClient.deployAsync(type, file,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment());
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void deploy(DeployType type, InputStream file, long length) {
deployAsync(type, file, length).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, InputStream file, long length) {
return deployAsync(type, file, length, new DeployOptions());
}
@Override
public void deploy(DeployType type, InputStream file, long length, DeployOptions deployOptions) {
deployAsync(type, file, length, deployOptions).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, InputStream file, long length, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
return kuduClient.deployAsync(type, file, length,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment());
}
@Override
public KuduDeploymentResult pushDeploy(DeployType type, File file, DeployOptions deployOptions) {
return pushDeployAsync(type, file, deployOptions).block();
}
@Override
public Mono<KuduDeploymentResult> pushDeployAsync(DeployType type, File file, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
try {
return kuduClient.pushDeployAsync(type, file,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment(),
deployOptions.trackDeployment());
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public CsmDeploymentStatus getDeploymentStatus(String deploymentId) {
return getDeploymentStatusAsync(deploymentId).block();
}
@Override
} | class DeploymentSlotImpl
extends DeploymentSlotBaseImpl<
DeploymentSlot,
DeploymentSlotImpl,
WebAppImpl,
DeploymentSlot.DefinitionStages.WithCreate,
DeploymentSlotBase.Update<DeploymentSlot>>
implements DeploymentSlot, DeploymentSlot.Definition {
DeploymentSlotImpl(
String name,
SiteInner innerObject,
SiteConfigResourceInner siteConfig,
SiteLogsConfigInner logConfig,
WebAppImpl parent) {
super(name, innerObject, siteConfig, logConfig, parent);
}
@Override
public DeploymentSlotImpl withConfigurationFromParent() {
return withConfigurationFromWebApp(this.parent());
}
@Override
public DeploymentSlotImpl withConfigurationFromWebApp(WebApp webApp) {
this.siteConfig = ((WebAppBaseImpl) webApp).siteConfig;
configurationSource = webApp;
return this;
}
@Override
public Mono<Void> warDeployAsync(File warFile) {
return warDeployAsync(warFile, null);
}
@Override
public void warDeploy(File warFile) {
warDeployAsync(warFile).block();
}
@Override
public Mono<Void> warDeployAsync(InputStream warFile, long length) {
return warDeployAsync(warFile, length, null);
}
@Override
public void warDeploy(InputStream warFile, long length) {
warDeployAsync(warFile, length).block();
}
@Override
public Mono<Void> warDeployAsync(File warFile, String appName) {
try {
return kuduClient.warDeployAsync(warFile, appName);
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void warDeploy(File warFile, String appName) {
warDeployAsync(warFile, appName).block();
}
@Override
public Mono<Void> warDeployAsync(InputStream warFile, long length, String appName) {
return kuduClient.warDeployAsync(warFile, length, appName);
}
@Override
public void warDeploy(InputStream warFile, long length, String appName) {
warDeployAsync(warFile, length, appName).block();
}
@Override
public void zipDeploy(File zipFile) {
zipDeployAsync(zipFile).block();
}
@Override
public void zipDeploy(InputStream zipFile, long length) {
zipDeployAsync(zipFile, length).block();
}
@Override
public Mono<Void> zipDeployAsync(InputStream zipFile, long length) {
return kuduClient.zipDeployAsync(zipFile, length).then(stopAsync()).then(startAsync());
}
@Override
public Mono<Void> zipDeployAsync(File zipFile) {
try {
return kuduClient.zipDeployAsync(zipFile);
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void deploy(DeployType type, File file) {
deployAsync(type, file).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, File file) {
return deployAsync(type, file, new DeployOptions());
}
@Override
public void deploy(DeployType type, File file, DeployOptions deployOptions) {
deployAsync(type, file, deployOptions).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, File file, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
try {
return kuduClient.deployAsync(type, file,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment());
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void deploy(DeployType type, InputStream file, long length) {
deployAsync(type, file, length).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, InputStream file, long length) {
return deployAsync(type, file, length, new DeployOptions());
}
@Override
public void deploy(DeployType type, InputStream file, long length, DeployOptions deployOptions) {
deployAsync(type, file, length, deployOptions).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, InputStream file, long length, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
return kuduClient.deployAsync(type, file, length,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment());
}
@Override
public KuduDeploymentResult pushDeploy(DeployType type, File file, DeployOptions deployOptions) {
return pushDeployAsync(type, file, deployOptions).block();
}
@Override
public Mono<KuduDeploymentResult> pushDeployAsync(DeployType type, File file, DeployOptions deployOptions) {
Objects.requireNonNull(type);
Objects.requireNonNull(file);
if (deployOptions == null) {
deployOptions = new DeployOptions();
}
try {
return kuduClient.pushDeployAsync(type, file,
deployOptions.path(), deployOptions.restartSite(), deployOptions.cleanDeployment(),
deployOptions.trackDeployment());
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public CsmDeploymentStatus getDeploymentStatus(String deploymentId) {
return getDeploymentStatusAsync(deploymentId).block();
}
@Override
} |
Since all these are `Boolean` properties, this will throw NPE if any of them is `null`. The constructor does initialize them but the user can still call `setLastWrittenOn(null)` to make this property `null`. So, we should check for null here. | public Boolean isAll() {
return isSetFileAttributes && isSetCreatedOn && isSetLastWrittenOn && isSetChangedOn;
} | return isSetFileAttributes && isSetCreatedOn && isSetLastWrittenOn && isSetChangedOn; | public Boolean isAll() {
return (isSetFileAttributes != null && isSetFileAttributes)
&& (isSetCreatedOn != null && isSetCreatedOn)
&& (isSetLastWrittenOn != null && isSetLastWrittenOn)
&& (isSetChangedOn != null && isSetChangedOn);
} | class CopyableFileSmbPropertiesList {
private Boolean isSetFileAttributes;
private Boolean isSetCreatedOn;
private Boolean isSetLastWrittenOn;
private Boolean isSetChangedOn;
/**
* Creates an instance of information about the file smb properties.
*/
public CopyableFileSmbPropertiesList() {
isSetFileAttributes = false;
isSetCreatedOn = false;
isSetLastWrittenOn = false;
isSetChangedOn = false;
}
/***
* @return a flag indicating whether file attributes should be copied from source file.
*/
public Boolean isFileAttributes() {
return isSetFileAttributes;
}
/**
* @param fileAttributes Flag indicating whether to copy file attributes from source file
* @return the updated {@link CopyableFileSmbPropertiesList}
*/
public CopyableFileSmbPropertiesList setFileAttributes(Boolean fileAttributes) {
isSetFileAttributes = fileAttributes;
return this;
}
/**
* @return a flag indicating whether created on timestamp should be copied from source file.
*/
public Boolean isCreatedOn() {
return isSetCreatedOn;
}
/**
* @param createdOn Flag indicating whether to copy created on timestamp from source file
* @return the updated {@link CopyableFileSmbPropertiesList}
*/
public CopyableFileSmbPropertiesList setCreatedOn(Boolean createdOn) {
isSetCreatedOn = createdOn;
return this;
}
/**
* @return a flag indicating whether last written on timestamp should be copied from source file.
*/
public Boolean isLastWrittenOn() {
return isSetLastWrittenOn;
}
/**
* @param lastWrittenOn Flag indicating whether to copy last written on timestamp from source file
* @return the updated {@link CopyableFileSmbPropertiesList}
*/
public CopyableFileSmbPropertiesList setLastWrittenOn(Boolean lastWrittenOn) {
isSetLastWrittenOn = lastWrittenOn;
return this;
}
/**
* @return a flag indicating whether changed on timestamp should be copied from source file.
*/
public Boolean isChangedOn() {
return isSetChangedOn;
}
/**
* @param changedOn Flag indicating whether to copy changed on timestamp from source file
* @return the updated {@link CopyableFileSmbPropertiesList}
*/
public CopyableFileSmbPropertiesList setChangedOn(Boolean changedOn) {
isSetChangedOn = changedOn;
return this;
}
/**
* @return whether all properties should be copied from the source file.
*/
/**
* @return whether no properties should be copied from the source file.
*/
public Boolean isNone() {
return !isSetFileAttributes && !isSetCreatedOn && !isSetLastWrittenOn && !isSetChangedOn;
}
/**
* @return a list of the flag set to true
*/
public List<CopyableFileSmbProperties> toList() {
List<CopyableFileSmbProperties> details = new ArrayList<>();
if (this.isSetFileAttributes) {
details.add(CopyableFileSmbProperties.FILE_ATTRIBUTES);
}
if (this.isSetFileAttributes) {
details.add(CopyableFileSmbProperties.FILE_ATTRIBUTES);
}
if (this.isSetCreatedOn) {
details.add(CopyableFileSmbProperties.CREATED_ON);
}
if (this.isSetLastWrittenOn) {
details.add(CopyableFileSmbProperties.LAST_WRITTEN_ON);
}
if (this.isSetChangedOn) {
details.add(CopyableFileSmbProperties.CHANGED_ON);
}
return details;
}
} | class CopyableFileSmbPropertiesList {
private Boolean isSetFileAttributes;
private Boolean isSetCreatedOn;
private Boolean isSetLastWrittenOn;
private Boolean isSetChangedOn;
/**
* Creates an instance of information about the file smb properties.
*/
public CopyableFileSmbPropertiesList() {
isSetFileAttributes = false;
isSetCreatedOn = false;
isSetLastWrittenOn = false;
isSetChangedOn = false;
}
/**
* Specifies whether file attributes should be copied from source file.
*
* @return {@code true} if file attributes should be copied.
*/
public Boolean isFileAttributes() {
return isSetFileAttributes;
}
/**
* Specify whether file attributes should be copied from source file.
*
* @param fileAttributes Flag indicating whether to copy file attributes from source file
* @return the updated {@link CopyableFileSmbPropertiesList}
*/
public CopyableFileSmbPropertiesList setFileAttributes(Boolean fileAttributes) {
isSetFileAttributes = fileAttributes;
return this;
}
/**
* Specifies whether created on timestamp should be copied from source file.
*
* @return {@code true} if created on timestamp should be copied.
*/
public Boolean isCreatedOn() {
return isSetCreatedOn;
}
/**
* Specify whether created on timestamp should be copied from source file.
*
* @param createdOn Flag indicating whether to copy created on timestamp from source file
* @return the updated {@link CopyableFileSmbPropertiesList}
*/
public CopyableFileSmbPropertiesList setCreatedOn(Boolean createdOn) {
isSetCreatedOn = createdOn;
return this;
}
/**
* Specifies whether last written on timestamp should be copied from source file.
*
* @return {@code true} if last written on timestamp should be copied.
*/
public Boolean isLastWrittenOn() {
return isSetLastWrittenOn;
}
/**
* Specify whether last written on timestamp should be copied from source file.
*
* @param lastWrittenOn Flag indicating whether to copy last written on timestamp from source file
* @return the updated {@link CopyableFileSmbPropertiesList}
*/
public CopyableFileSmbPropertiesList setLastWrittenOn(Boolean lastWrittenOn) {
isSetLastWrittenOn = lastWrittenOn;
return this;
}
/**
* Specifies whether changed on timestamp should be copied from source file.
*
* @return {@code true} if changed on timestamp should be copied.
*/
public Boolean isChangedOn() {
return isSetChangedOn;
}
/**
* Specify whether changed on timestamp should be copied from source file.
*
* @param changedOn Flag indicating whether to copy changed on timestamp from source file
* @return the updated {@link CopyableFileSmbPropertiesList}
*/
public CopyableFileSmbPropertiesList setChangedOn(Boolean changedOn) {
isSetChangedOn = changedOn;
return this;
}
/**
* Specifies whether all properties should be copied from source file.
*
* @return whether all properties should be copied from the source file.
*/
/**
* Specifies whether no properties should be copied from source file.
*
* @return whether no properties should be copied from the source file.
*/
public Boolean isNone() {
return (isSetFileAttributes == null || !isSetFileAttributes)
&& (isSetCreatedOn == null || !isSetCreatedOn)
&& (isSetLastWrittenOn == null || !isSetLastWrittenOn)
&& (isSetChangedOn == null || !isSetChangedOn);
}
/**
* Gets list of properties that are set to true.
*
* @return a list of the flag set to true.
*/
public List<CopyableFileSmbProperties> toList() {
List<CopyableFileSmbProperties> details = new ArrayList<>();
if (this.isSetFileAttributes) {
details.add(CopyableFileSmbProperties.FILE_ATTRIBUTES);
}
if (this.isSetFileAttributes) {
details.add(CopyableFileSmbProperties.FILE_ATTRIBUTES);
}
if (this.isSetCreatedOn) {
details.add(CopyableFileSmbProperties.CREATED_ON);
}
if (this.isSetLastWrittenOn) {
details.add(CopyableFileSmbProperties.LAST_WRITTEN_ON);
}
if (this.isSetChangedOn) {
details.add(CopyableFileSmbProperties.CHANGED_ON);
}
return details;
}
} |
Include a message in your error. | public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(requestedPartitionOwnerships).handle(((partitionOwnership, sink) -> {
String partitionId = partitionOwnership.getPartitionId();
String key = keyBuilder(prefixBuilder(partitionOwnership.getFullyQualifiedNamespace(), partitionOwnership.getEventHubName(), partitionOwnership.getConsumerGroup()), partitionId);
try (Jedis jedis = jedisPool.getResource()) {
List<String> keyInformation = jedis.hmget(key, PARTITION_OWNERSHIP);
String currentPartitionOwnership = keyInformation.get(0);
if (currentPartitionOwnership == null) {
jedis.hset(key, PARTITION_OWNERSHIP, new String(DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership), StandardCharsets.UTF_8));
} else {
jedis.watch(key);
Transaction transaction = jedis.multi();
transaction.hset(key, PARTITION_OWNERSHIP, new String(DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership), StandardCharsets.UTF_8));
List<Object> executionResponse = transaction.exec();
if (executionResponse == null) {
sink.error(new RuntimeException());
}
}
jedisPool.returnResource(jedis);
}
sink.next(partitionOwnership);
}));
} | sink.error(new RuntimeException()); | public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(requestedPartitionOwnerships).handle(((partitionOwnership, sink) -> {
String partitionId = partitionOwnership.getPartitionId();
byte[] key = keyBuilder(partitionOwnership.getFullyQualifiedNamespace(), partitionOwnership.getEventHubName(), partitionOwnership.getConsumerGroup(), partitionId);
try (Jedis jedis = jedisPool.getResource()) {
List<byte[]> keyInformation = jedis.hmget(key, PARTITION_OWNERSHIP);
byte[] currentPartitionOwnership = keyInformation.get(0);
if (currentPartitionOwnership == null) {
jedis.hset(key, PARTITION_OWNERSHIP, DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership));
} else {
jedis.watch(key);
Transaction transaction = jedis.multi();
transaction.hset(key, PARTITION_OWNERSHIP, DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership));
List<Object> executionResponse = transaction.exec();
if (executionResponse == null) {
sink.error(new RuntimeException("Ownership records were changed by another client"));
}
}
jedisPool.returnResource(jedis);
}
sink.next(partitionOwnership);
}));
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Flux of PartitionOwnership objects
*/
@Override
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*of
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of Checkpoint objects
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
}
/** This method returns the list of ownership records from the underlying data store, and if no ownership records are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of PartitionOwnership objects
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Mono that completes if no errors take place
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
} else {
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final byte[] CHECKPOINT = "checkpoint".getBytes(StandardCharsets.UTF_8);
static final byte[] PARTITION_OWNERSHIP = "partitionOwnership".getBytes(StandardCharsets.UTF_8);
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Flux of PartitionOwnership objects
*/
@Override
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of Checkpoint objects
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
byte[] prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<byte[]> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (byte[] member : members) {
List<byte[]> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
byte[] checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson, TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
}
/**
* This method returns the list of ownership records from the underlying data store, and if no ownership records are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of PartitionOwnership objects
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
byte[] prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<byte[]> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (byte[] member : members) {
List<byte[]> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
byte[] partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson, TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Mono that completes if no errors take place
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
return Mono.fromRunnable(() -> {
byte[] prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
byte[] key = keyBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup(), checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, DEFAULT_SERIALIZER.serializeToBytes(checkpoint));
} else {
jedis.hset(key, CHECKPOINT, DEFAULT_SERIALIZER.serializeToBytes(checkpoint));
}
jedisPool.returnResource(jedis);
}
});
}
static byte[] prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return (fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup).getBytes(StandardCharsets.UTF_8);
}
static byte[] keyBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup, String partitionId) {
return (fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup + "/" + partitionId).getBytes(StandardCharsets.UTF_8);
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
I've noticed this pattern of serializing to bytes, then turning it back into a string. There is an overload that takes bytes[]. Is there a reason to use string? using string forces us to go over the data twice.... object -> bytes -> reads bytes back into string. | public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
} else {
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
} | jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8)); | public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
return Mono.fromRunnable(() -> {
byte[] prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
byte[] key = keyBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup(), checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, DEFAULT_SERIALIZER.serializeToBytes(checkpoint));
} else {
jedis.hset(key, CHECKPOINT, DEFAULT_SERIALIZER.serializeToBytes(checkpoint));
}
jedisPool.returnResource(jedis);
}
});
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Flux of PartitionOwnership objects
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(requestedPartitionOwnerships).handle(((partitionOwnership, sink) -> {
String partitionId = partitionOwnership.getPartitionId();
String key = keyBuilder(prefixBuilder(partitionOwnership.getFullyQualifiedNamespace(), partitionOwnership.getEventHubName(), partitionOwnership.getConsumerGroup()), partitionId);
try (Jedis jedis = jedisPool.getResource()) {
List<String> keyInformation = jedis.hmget(key, PARTITION_OWNERSHIP);
String currentPartitionOwnership = keyInformation.get(0);
if (currentPartitionOwnership == null) {
jedis.hset(key, PARTITION_OWNERSHIP, new String(DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership), StandardCharsets.UTF_8));
} else {
jedis.watch(key);
Transaction transaction = jedis.multi();
transaction.hset(key, PARTITION_OWNERSHIP, new String(DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership), StandardCharsets.UTF_8));
List<Object> executionResponse = transaction.exec();
if (executionResponse == null) {
sink.error(new RuntimeException());
}
}
jedisPool.returnResource(jedis);
}
sink.next(partitionOwnership);
}));
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*of
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of Checkpoint objects
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
}
/** This method returns the list of ownership records from the underlying data store, and if no ownership records are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of PartitionOwnership objects
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Mono that completes if no errors take place
*/
@Override
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final byte[] CHECKPOINT = "checkpoint".getBytes(StandardCharsets.UTF_8);
static final byte[] PARTITION_OWNERSHIP = "partitionOwnership".getBytes(StandardCharsets.UTF_8);
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Flux of PartitionOwnership objects
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(requestedPartitionOwnerships).handle(((partitionOwnership, sink) -> {
String partitionId = partitionOwnership.getPartitionId();
byte[] key = keyBuilder(partitionOwnership.getFullyQualifiedNamespace(), partitionOwnership.getEventHubName(), partitionOwnership.getConsumerGroup(), partitionId);
try (Jedis jedis = jedisPool.getResource()) {
List<byte[]> keyInformation = jedis.hmget(key, PARTITION_OWNERSHIP);
byte[] currentPartitionOwnership = keyInformation.get(0);
if (currentPartitionOwnership == null) {
jedis.hset(key, PARTITION_OWNERSHIP, DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership));
} else {
jedis.watch(key);
Transaction transaction = jedis.multi();
transaction.hset(key, PARTITION_OWNERSHIP, DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership));
List<Object> executionResponse = transaction.exec();
if (executionResponse == null) {
sink.error(new RuntimeException("Ownership records were changed by another client"));
}
}
jedisPool.returnResource(jedis);
}
sink.next(partitionOwnership);
}));
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of Checkpoint objects
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
byte[] prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<byte[]> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (byte[] member : members) {
List<byte[]> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
byte[] checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson, TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
}
/**
* This method returns the list of ownership records from the underlying data store, and if no ownership records are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of PartitionOwnership objects
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
byte[] prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<byte[]> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (byte[] member : members) {
List<byte[]> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
byte[] partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson, TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Mono that completes if no errors take place
*/
@Override
static byte[] prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return (fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup).getBytes(StandardCharsets.UTF_8);
}
static byte[] keyBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup, String partitionId) {
return (fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup + "/" + partitionId).getBytes(StandardCharsets.UTF_8);
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
nit: between variable declarations into flow control like this and (ie. between line 50 and 51, 59 and 60) add a new line. This is hard for me to read because it looks like a giant block of prose. | public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(requestedPartitionOwnerships).handle(((partitionOwnership, sink) -> {
String partitionId = partitionOwnership.getPartitionId();
String key = keyBuilder(prefixBuilder(partitionOwnership.getFullyQualifiedNamespace(), partitionOwnership.getEventHubName(), partitionOwnership.getConsumerGroup()), partitionId);
try (Jedis jedis = jedisPool.getResource()) {
List<String> keyInformation = jedis.hmget(key, PARTITION_OWNERSHIP);
String currentPartitionOwnership = keyInformation.get(0);
if (currentPartitionOwnership == null) {
jedis.hset(key, PARTITION_OWNERSHIP, new String(DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership), StandardCharsets.UTF_8));
} else {
jedis.watch(key);
Transaction transaction = jedis.multi();
transaction.hset(key, PARTITION_OWNERSHIP, new String(DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership), StandardCharsets.UTF_8));
List<Object> executionResponse = transaction.exec();
if (executionResponse == null) {
sink.error(new RuntimeException());
}
}
jedisPool.returnResource(jedis);
}
sink.next(partitionOwnership);
}));
} | try (Jedis jedis = jedisPool.getResource()) { | public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(requestedPartitionOwnerships).handle(((partitionOwnership, sink) -> {
String partitionId = partitionOwnership.getPartitionId();
byte[] key = keyBuilder(partitionOwnership.getFullyQualifiedNamespace(), partitionOwnership.getEventHubName(), partitionOwnership.getConsumerGroup(), partitionId);
try (Jedis jedis = jedisPool.getResource()) {
List<byte[]> keyInformation = jedis.hmget(key, PARTITION_OWNERSHIP);
byte[] currentPartitionOwnership = keyInformation.get(0);
if (currentPartitionOwnership == null) {
jedis.hset(key, PARTITION_OWNERSHIP, DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership));
} else {
jedis.watch(key);
Transaction transaction = jedis.multi();
transaction.hset(key, PARTITION_OWNERSHIP, DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership));
List<Object> executionResponse = transaction.exec();
if (executionResponse == null) {
sink.error(new RuntimeException("Ownership records were changed by another client"));
}
}
jedisPool.returnResource(jedis);
}
sink.next(partitionOwnership);
}));
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Flux of PartitionOwnership objects
*/
@Override
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*of
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of Checkpoint objects
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
}
/** This method returns the list of ownership records from the underlying data store, and if no ownership records are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of PartitionOwnership objects
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Mono that completes if no errors take place
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
} else {
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final byte[] CHECKPOINT = "checkpoint".getBytes(StandardCharsets.UTF_8);
static final byte[] PARTITION_OWNERSHIP = "partitionOwnership".getBytes(StandardCharsets.UTF_8);
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Flux of PartitionOwnership objects
*/
@Override
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of Checkpoint objects
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
byte[] prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<byte[]> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (byte[] member : members) {
List<byte[]> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
byte[] checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson, TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
}
/**
* This method returns the list of ownership records from the underlying data store, and if no ownership records are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of PartitionOwnership objects
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
byte[] prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<byte[]> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (byte[] member : members) {
List<byte[]> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
byte[] partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson, TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Mono that completes if no errors take place
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
return Mono.fromRunnable(() -> {
byte[] prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
byte[] key = keyBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup(), checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, DEFAULT_SERIALIZER.serializeToBytes(checkpoint));
} else {
jedis.hset(key, CHECKPOINT, DEFAULT_SERIALIZER.serializeToBytes(checkpoint));
}
jedisPool.returnResource(jedis);
}
});
}
static byte[] prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return (fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup).getBytes(StandardCharsets.UTF_8);
}
static byte[] keyBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup, String partitionId) {
return (fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup + "/" + partitionId).getBytes(StandardCharsets.UTF_8);
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
Added in new lines wherever I noticed this. I'll keep it in mind going forward too! | public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(requestedPartitionOwnerships).handle(((partitionOwnership, sink) -> {
String partitionId = partitionOwnership.getPartitionId();
String key = keyBuilder(prefixBuilder(partitionOwnership.getFullyQualifiedNamespace(), partitionOwnership.getEventHubName(), partitionOwnership.getConsumerGroup()), partitionId);
try (Jedis jedis = jedisPool.getResource()) {
List<String> keyInformation = jedis.hmget(key, PARTITION_OWNERSHIP);
String currentPartitionOwnership = keyInformation.get(0);
if (currentPartitionOwnership == null) {
jedis.hset(key, PARTITION_OWNERSHIP, new String(DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership), StandardCharsets.UTF_8));
} else {
jedis.watch(key);
Transaction transaction = jedis.multi();
transaction.hset(key, PARTITION_OWNERSHIP, new String(DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership), StandardCharsets.UTF_8));
List<Object> executionResponse = transaction.exec();
if (executionResponse == null) {
sink.error(new RuntimeException());
}
}
jedisPool.returnResource(jedis);
}
sink.next(partitionOwnership);
}));
} | try (Jedis jedis = jedisPool.getResource()) { | public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(requestedPartitionOwnerships).handle(((partitionOwnership, sink) -> {
String partitionId = partitionOwnership.getPartitionId();
byte[] key = keyBuilder(partitionOwnership.getFullyQualifiedNamespace(), partitionOwnership.getEventHubName(), partitionOwnership.getConsumerGroup(), partitionId);
try (Jedis jedis = jedisPool.getResource()) {
List<byte[]> keyInformation = jedis.hmget(key, PARTITION_OWNERSHIP);
byte[] currentPartitionOwnership = keyInformation.get(0);
if (currentPartitionOwnership == null) {
jedis.hset(key, PARTITION_OWNERSHIP, DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership));
} else {
jedis.watch(key);
Transaction transaction = jedis.multi();
transaction.hset(key, PARTITION_OWNERSHIP, DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership));
List<Object> executionResponse = transaction.exec();
if (executionResponse == null) {
sink.error(new RuntimeException("Ownership records were changed by another client"));
}
}
jedisPool.returnResource(jedis);
}
sink.next(partitionOwnership);
}));
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Flux of PartitionOwnership objects
*/
@Override
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*of
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of Checkpoint objects
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
}
/** This method returns the list of ownership records from the underlying data store, and if no ownership records are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of PartitionOwnership objects
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Mono that completes if no errors take place
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
} else {
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final byte[] CHECKPOINT = "checkpoint".getBytes(StandardCharsets.UTF_8);
static final byte[] PARTITION_OWNERSHIP = "partitionOwnership".getBytes(StandardCharsets.UTF_8);
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Flux of PartitionOwnership objects
*/
@Override
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of Checkpoint objects
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
byte[] prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<byte[]> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (byte[] member : members) {
List<byte[]> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
byte[] checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson, TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
}
/**
* This method returns the list of ownership records from the underlying data store, and if no ownership records are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of PartitionOwnership objects
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
byte[] prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<byte[]> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (byte[] member : members) {
List<byte[]> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
byte[] partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson, TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Mono that completes if no errors take place
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
return Mono.fromRunnable(() -> {
byte[] prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
byte[] key = keyBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup(), checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, DEFAULT_SERIALIZER.serializeToBytes(checkpoint));
} else {
jedis.hset(key, CHECKPOINT, DEFAULT_SERIALIZER.serializeToBytes(checkpoint));
}
jedisPool.returnResource(jedis);
}
});
}
static byte[] prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return (fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup).getBytes(StandardCharsets.UTF_8);
}
static byte[] keyBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup, String partitionId) {
return (fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup + "/" + partitionId).getBytes(StandardCharsets.UTF_8);
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
I went ahead and changed the implementation to only go from object <-> bytes. | public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
} else {
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
} | jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8)); | public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
return Mono.fromRunnable(() -> {
byte[] prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
byte[] key = keyBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup(), checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, DEFAULT_SERIALIZER.serializeToBytes(checkpoint));
} else {
jedis.hset(key, CHECKPOINT, DEFAULT_SERIALIZER.serializeToBytes(checkpoint));
}
jedisPool.returnResource(jedis);
}
});
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Flux of PartitionOwnership objects
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(requestedPartitionOwnerships).handle(((partitionOwnership, sink) -> {
String partitionId = partitionOwnership.getPartitionId();
String key = keyBuilder(prefixBuilder(partitionOwnership.getFullyQualifiedNamespace(), partitionOwnership.getEventHubName(), partitionOwnership.getConsumerGroup()), partitionId);
try (Jedis jedis = jedisPool.getResource()) {
List<String> keyInformation = jedis.hmget(key, PARTITION_OWNERSHIP);
String currentPartitionOwnership = keyInformation.get(0);
if (currentPartitionOwnership == null) {
jedis.hset(key, PARTITION_OWNERSHIP, new String(DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership), StandardCharsets.UTF_8));
} else {
jedis.watch(key);
Transaction transaction = jedis.multi();
transaction.hset(key, PARTITION_OWNERSHIP, new String(DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership), StandardCharsets.UTF_8));
List<Object> executionResponse = transaction.exec();
if (executionResponse == null) {
sink.error(new RuntimeException());
}
}
jedisPool.returnResource(jedis);
}
sink.next(partitionOwnership);
}));
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*of
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of Checkpoint objects
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
}
/** This method returns the list of ownership records from the underlying data store, and if no ownership records are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of PartitionOwnership objects
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Mono that completes if no errors take place
*/
@Override
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final byte[] CHECKPOINT = "checkpoint".getBytes(StandardCharsets.UTF_8);
static final byte[] PARTITION_OWNERSHIP = "partitionOwnership".getBytes(StandardCharsets.UTF_8);
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Flux of PartitionOwnership objects
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(requestedPartitionOwnerships).handle(((partitionOwnership, sink) -> {
String partitionId = partitionOwnership.getPartitionId();
byte[] key = keyBuilder(partitionOwnership.getFullyQualifiedNamespace(), partitionOwnership.getEventHubName(), partitionOwnership.getConsumerGroup(), partitionId);
try (Jedis jedis = jedisPool.getResource()) {
List<byte[]> keyInformation = jedis.hmget(key, PARTITION_OWNERSHIP);
byte[] currentPartitionOwnership = keyInformation.get(0);
if (currentPartitionOwnership == null) {
jedis.hset(key, PARTITION_OWNERSHIP, DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership));
} else {
jedis.watch(key);
Transaction transaction = jedis.multi();
transaction.hset(key, PARTITION_OWNERSHIP, DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership));
List<Object> executionResponse = transaction.exec();
if (executionResponse == null) {
sink.error(new RuntimeException("Ownership records were changed by another client"));
}
}
jedisPool.returnResource(jedis);
}
sink.next(partitionOwnership);
}));
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of Checkpoint objects
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
byte[] prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<byte[]> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (byte[] member : members) {
List<byte[]> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
byte[] checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson, TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
}
/**
* This method returns the list of ownership records from the underlying data store, and if no ownership records are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of PartitionOwnership objects
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
byte[] prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<byte[]> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (byte[] member : members) {
List<byte[]> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
byte[] partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson, TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Mono that completes if no errors take place
*/
@Override
static byte[] prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return (fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup).getBytes(StandardCharsets.UTF_8);
}
static byte[] keyBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup, String partitionId) {
return (fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup + "/" + partitionId).getBytes(StandardCharsets.UTF_8);
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
I would wrap this in a Mono so it's possible to run this asynchronously rather than synchronously. ```java return Mono.fromRunnable(() -> { byte[] prefix... // rest of your stuff }); ``` | public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
byte[] prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
byte[] key = keyBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup(), checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, DEFAULT_SERIALIZER.serializeToBytes(checkpoint));
} else {
jedis.hset(key, CHECKPOINT, DEFAULT_SERIALIZER.serializeToBytes(checkpoint));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
} | byte[] prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup()); | public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
return Mono.fromRunnable(() -> {
byte[] prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
byte[] key = keyBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup(), checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, DEFAULT_SERIALIZER.serializeToBytes(checkpoint));
} else {
jedis.hset(key, CHECKPOINT, DEFAULT_SERIALIZER.serializeToBytes(checkpoint));
}
jedisPool.returnResource(jedis);
}
});
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final byte[] CHECKPOINT = "checkpoint".getBytes(StandardCharsets.UTF_8);
static final byte[] PARTITION_OWNERSHIP = "partitionOwnership".getBytes(StandardCharsets.UTF_8);
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Flux of PartitionOwnership objects
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(requestedPartitionOwnerships).handle(((partitionOwnership, sink) -> {
String partitionId = partitionOwnership.getPartitionId();
byte[] key = keyBuilder(partitionOwnership.getFullyQualifiedNamespace(), partitionOwnership.getEventHubName(), partitionOwnership.getConsumerGroup(), partitionId);
try (Jedis jedis = jedisPool.getResource()) {
List<byte[]> keyInformation = jedis.hmget(key, PARTITION_OWNERSHIP);
byte[] currentPartitionOwnership = keyInformation.get(0);
if (currentPartitionOwnership == null) {
jedis.hset(key, PARTITION_OWNERSHIP, DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership));
} else {
jedis.watch(key);
Transaction transaction = jedis.multi();
transaction.hset(key, PARTITION_OWNERSHIP, DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership));
List<Object> executionResponse = transaction.exec();
if (executionResponse == null) {
sink.error(new RuntimeException("Ownership records were changed by another client"));
}
}
jedisPool.returnResource(jedis);
}
sink.next(partitionOwnership);
}));
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of Checkpoint objects
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
byte[] prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<byte[]> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (byte[] member : members) {
List<byte[]> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
byte[] checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson, TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
}
/**
* This method returns the list of ownership records from the underlying data store, and if no ownership records are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of PartitionOwnership objects
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
byte[] prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<byte[]> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (byte[] member : members) {
List<byte[]> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
byte[] partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson, TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Mono that completes if no errors take place
*/
@Override
static byte[] prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return (fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup).getBytes(StandardCharsets.UTF_8);
}
static byte[] keyBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup, String partitionId) {
return (fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup + "/" + partitionId).getBytes(StandardCharsets.UTF_8);
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final byte[] CHECKPOINT = "checkpoint".getBytes(StandardCharsets.UTF_8);
static final byte[] PARTITION_OWNERSHIP = "partitionOwnership".getBytes(StandardCharsets.UTF_8);
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Flux of PartitionOwnership objects
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(requestedPartitionOwnerships).handle(((partitionOwnership, sink) -> {
String partitionId = partitionOwnership.getPartitionId();
byte[] key = keyBuilder(partitionOwnership.getFullyQualifiedNamespace(), partitionOwnership.getEventHubName(), partitionOwnership.getConsumerGroup(), partitionId);
try (Jedis jedis = jedisPool.getResource()) {
List<byte[]> keyInformation = jedis.hmget(key, PARTITION_OWNERSHIP);
byte[] currentPartitionOwnership = keyInformation.get(0);
if (currentPartitionOwnership == null) {
jedis.hset(key, PARTITION_OWNERSHIP, DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership));
} else {
jedis.watch(key);
Transaction transaction = jedis.multi();
transaction.hset(key, PARTITION_OWNERSHIP, DEFAULT_SERIALIZER.serializeToBytes(partitionOwnership));
List<Object> executionResponse = transaction.exec();
if (executionResponse == null) {
sink.error(new RuntimeException("Ownership records were changed by another client"));
}
}
jedisPool.returnResource(jedis);
}
sink.next(partitionOwnership);
}));
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of Checkpoint objects
*/
@Override
public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
byte[] prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<byte[]> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (byte[] member : members) {
List<byte[]> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
byte[] checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson, TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
}
/**
* This method returns the list of ownership records from the underlying data store, and if no ownership records are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Flux of PartitionOwnership objects
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
byte[] prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<byte[]> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (byte[] member : members) {
List<byte[]> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
byte[] partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson, TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Mono that completes if no errors take place
*/
@Override
static byte[] prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return (fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup).getBytes(StandardCharsets.UTF_8);
}
static byte[] keyBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup, String partitionId) {
return (fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup + "/" + partitionId).getBytes(StandardCharsets.UTF_8);
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
what is this if(this.limit == 0) used for? | public void withLimit(int limit) {
if (this.limit == 0) {
this.limit = limit;
}
} | if (this.limit == 0) { | public void withLimit(int limit) {
if (this.limit == 0) {
this.limit = limit;
}
} | class CosmosQuery {
private final Criteria criteria;
private Sort sort = Sort.unsorted();
private Pageable pageable = Pageable.unpaged();
private int limit;
private long offset;
/**
* Initialization
*
* @param criteria object
*/
public CosmosQuery(@NonNull Criteria criteria) {
this.criteria = criteria;
}
/**
* To get Criteria object
*
* @return Criteria
*/
public Criteria getCriteria() {
return criteria;
}
/**
* To get Sort object
*
* @return Sort
*/
public Sort getSort() {
return sort;
}
/**
* To get Pageable object
*
* @return Pageable
*/
public Pageable getPageable() {
return pageable;
}
/**
* To get limit number
*
* @return int limit
*/
public int getLimit() {
return limit;
}
/**
* To get offset number
*
* @return long offset
*/
public long getOffset() {
return offset;
}
/**
* To set limit number
*
* @param limit int
*/
/**
* To set limit number and offset number
*
* @param offset long
* @param limit int
*/
public void withOffsetAndLimit(long offset, int limit) {
if (this.limit == 0) {
this.limit = limit;
}
this.offset = offset;
}
/**
* With Sort
*
* @param sort Sort
* @return DocumentQuery object
*/
public CosmosQuery with(@NonNull Sort sort) {
if (sort.isSorted()) {
this.sort = sort.and(this.sort);
}
return this;
}
/**
* With Sort
*
* @param pageable Sort
* @return DocumentQuery object
*/
public CosmosQuery with(@NonNull Pageable pageable) {
Assert.notNull(pageable, "pageable should not be null");
this.pageable = pageable;
return this;
}
private boolean isCrossPartitionQuery(@NonNull String keyName) {
Assert.hasText(keyName, "PartitionKey should have text.");
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, keyName);
return criteria.map(criteria1 -> {
if (isEqualCriteria(criteria1)) {
return false;
}
if (criteria1.getType() == CriteriaType.IN && criteria1.getSubjectValues().size() == 1) {
@SuppressWarnings("unchecked")
Collection<Object> collection = (Collection<Object>) criteria1.getSubjectValues().get(0);
return collection.size() != 1;
}
return !hasKeywordAnd();
}).orElse(true);
}
private boolean hasKeywordOr() {
return this.criteria.getType() == CriteriaType.OR;
}
private boolean hasKeywordAnd() {
return this.criteria.getType() == CriteriaType.AND;
}
private boolean isEqualCriteria(Criteria criteria) {
return criteria.getType() == CriteriaType.IS_EQUAL;
}
/**
* Indicate if DocumentQuery should enable cross partition query.
*
* @param partitionKeys The list of partitionKey names.
* @return If DocumentQuery should enable cross partition query
*/
public boolean isCrossPartitionQuery(@NonNull List<String> partitionKeys) {
if (partitionKeys.isEmpty()) {
return true;
}
return partitionKeys.stream().filter(this::isCrossPartitionQuery)
.findFirst()
.map(p -> true)
.orElse(hasKeywordOr());
}
/**
* Returns true if this criteria or sub-criteria has partition key field present as one of the subjects.
* @param partitionKeyFieldName partition key field name
* @return returns true if this criteria or sub criteria has partition key field present as one of the subjects.
*/
public boolean hasPartitionKeyCriteria(@NonNull String partitionKeyFieldName) {
if (partitionKeyFieldName.isEmpty()) {
return false;
}
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, partitionKeyFieldName);
return criteria.isPresent();
}
/**
* Returns partition key value based on the criteria.
* @param domainType domain type
* @param <T> entity class type
* @return Optional of partition key value
*/
public <T> Optional<Object> getPartitionKeyValue(@NonNull Class<T> domainType) {
CosmosEntityInformation<?, ?> instance = CosmosEntityInformation.getInstance(domainType);
String partitionKeyFieldName = instance.getPartitionKeyFieldName();
if (partitionKeyFieldName == null
|| partitionKeyFieldName.isEmpty()
|| isCrossPartitionQuery(Collections.singletonList(partitionKeyFieldName))) {
return Optional.empty();
}
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, partitionKeyFieldName);
return criteria.map(criteria1 -> {
if (!criteria1.getIgnoreCase().equals(Part.IgnoreCaseType.NEVER)) {
return null;
}
if (criteria1.getType() == CriteriaType.IN && criteria1.getSubjectValues().size() == 1) {
@SuppressWarnings("unchecked")
Collection<Object> collection = (Collection<Object>) criteria1.getSubjectValues().get(0);
if (collection.size() == 1) {
return collection.iterator().next();
} else {
return null;
}
}
return criteria1.getSubjectValues().get(0);
});
}
/**
* To get criteria by type
*
* @param criteriaType the criteria type
* @return Optional
*/
public Optional<Criteria> getCriteriaByType(@NonNull CriteriaType criteriaType) {
return getCriteriaByType(criteriaType, this.criteria);
}
private Optional<Criteria> getCriteriaByType(@NonNull CriteriaType criteriaType, @NonNull Criteria criteria) {
if (criteria.getType().equals(criteriaType)) {
return Optional.of(criteria);
}
for (final Criteria subCriteria : criteria.getSubCriteria()) {
if (getCriteriaByType(criteriaType, subCriteria).isPresent()) {
return Optional.of(subCriteria);
}
}
return Optional.empty();
}
private Optional<Criteria> getSubjectCriteria(@NonNull Criteria criteria, @NonNull String keyName) {
if (keyName.equals(criteria.getSubject())) {
return Optional.of(criteria);
}
final List<Criteria> subCriteriaList = criteria.getSubCriteria();
for (final Criteria c : subCriteriaList) {
final Optional<Criteria> subjectCriteria = getSubjectCriteria(c, keyName);
if (subjectCriteria.isPresent()) {
return subjectCriteria;
}
}
return Optional.empty();
}
} | class CosmosQuery {
private final Criteria criteria;
private Sort sort = Sort.unsorted();
private Pageable pageable = Pageable.unpaged();
private int limit;
private long offset;
/**
* Initialization
*
* @param criteria object
*/
public CosmosQuery(@NonNull Criteria criteria) {
this.criteria = criteria;
}
/**
* To get Criteria object
*
* @return Criteria
*/
public Criteria getCriteria() {
return criteria;
}
/**
* To get Sort object
*
* @return Sort
*/
public Sort getSort() {
return sort;
}
/**
* To get Pageable object
*
* @return Pageable
*/
public Pageable getPageable() {
return pageable;
}
/**
* To get limit number
*
* @return int limit
*/
public int getLimit() {
return limit;
}
/**
* To get offset number
*
* @return long offset
*/
public long getOffset() {
return offset;
}
/**
* To set limit number
*
* @param limit int
* @deprecated use withLimit instead
*/
@Deprecated
public void setLimit(int limit) {
if (this.limit == 0) {
this.limit = limit;
}
}
/**
* To set limit number
*
* @param limit int
*/
/**
* To set limit number and offset number
*
* @param offset long
* @param limit int
*/
public void withOffsetAndLimit(long offset, int limit) {
if (this.limit == 0) {
this.limit = limit;
}
if (this.offset == 0) {
this.offset = offset;
}
}
/**
* With Sort
*
* @param sort Sort
* @return DocumentQuery object
*/
public CosmosQuery with(@NonNull Sort sort) {
if (sort.isSorted()) {
this.sort = sort.and(this.sort);
}
return this;
}
/**
* With Sort
*
* @param pageable Sort
* @return DocumentQuery object
*/
public CosmosQuery with(@NonNull Pageable pageable) {
Assert.notNull(pageable, "pageable should not be null");
this.pageable = pageable;
return this;
}
private boolean isCrossPartitionQuery(@NonNull String keyName) {
Assert.hasText(keyName, "PartitionKey should have text.");
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, keyName);
return criteria.map(criteria1 -> {
if (isEqualCriteria(criteria1)) {
return false;
}
if (criteria1.getType() == CriteriaType.IN && criteria1.getSubjectValues().size() == 1) {
@SuppressWarnings("unchecked")
Collection<Object> collection = (Collection<Object>) criteria1.getSubjectValues().get(0);
return collection.size() != 1;
}
return !hasKeywordAnd();
}).orElse(true);
}
private boolean hasKeywordOr() {
return this.criteria.getType() == CriteriaType.OR;
}
private boolean hasKeywordAnd() {
return this.criteria.getType() == CriteriaType.AND;
}
private boolean isEqualCriteria(Criteria criteria) {
return criteria.getType() == CriteriaType.IS_EQUAL;
}
/**
* Indicate if DocumentQuery should enable cross partition query.
*
* @param partitionKeys The list of partitionKey names.
* @return If DocumentQuery should enable cross partition query
*/
public boolean isCrossPartitionQuery(@NonNull List<String> partitionKeys) {
if (partitionKeys.isEmpty()) {
return true;
}
return partitionKeys.stream().filter(this::isCrossPartitionQuery)
.findFirst()
.map(p -> true)
.orElse(hasKeywordOr());
}
/**
* Returns true if this criteria or sub-criteria has partition key field present as one of the subjects.
* @param partitionKeyFieldName partition key field name
* @return returns true if this criteria or sub criteria has partition key field present as one of the subjects.
*/
public boolean hasPartitionKeyCriteria(@NonNull String partitionKeyFieldName) {
if (partitionKeyFieldName.isEmpty()) {
return false;
}
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, partitionKeyFieldName);
return criteria.isPresent();
}
/**
* Returns partition key value based on the criteria.
* @param domainType domain type
* @param <T> entity class type
* @return Optional of partition key value
*/
public <T> Optional<Object> getPartitionKeyValue(@NonNull Class<T> domainType) {
CosmosEntityInformation<?, ?> instance = CosmosEntityInformation.getInstance(domainType);
String partitionKeyFieldName = instance.getPartitionKeyFieldName();
if (partitionKeyFieldName == null
|| partitionKeyFieldName.isEmpty()
|| isCrossPartitionQuery(Collections.singletonList(partitionKeyFieldName))) {
return Optional.empty();
}
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, partitionKeyFieldName);
return criteria.map(criteria1 -> {
if (!criteria1.getIgnoreCase().equals(Part.IgnoreCaseType.NEVER)) {
return null;
}
if (criteria1.getType() == CriteriaType.IN && criteria1.getSubjectValues().size() == 1) {
@SuppressWarnings("unchecked")
Collection<Object> collection = (Collection<Object>) criteria1.getSubjectValues().get(0);
if (collection.size() == 1) {
return collection.iterator().next();
} else {
return null;
}
}
return criteria1.getSubjectValues().get(0);
});
}
/**
* To get criteria by type
*
* @param criteriaType the criteria type
* @return Optional
*/
public Optional<Criteria> getCriteriaByType(@NonNull CriteriaType criteriaType) {
return getCriteriaByType(criteriaType, this.criteria);
}
private Optional<Criteria> getCriteriaByType(@NonNull CriteriaType criteriaType, @NonNull Criteria criteria) {
if (criteria.getType().equals(criteriaType)) {
return Optional.of(criteria);
}
for (final Criteria subCriteria : criteria.getSubCriteria()) {
if (getCriteriaByType(criteriaType, subCriteria).isPresent()) {
return Optional.of(subCriteria);
}
}
return Optional.empty();
}
private Optional<Criteria> getSubjectCriteria(@NonNull Criteria criteria, @NonNull String keyName) {
if (keyName.equals(criteria.getSubject())) {
return Optional.of(criteria);
}
final List<Criteria> subCriteriaList = criteria.getSubCriteria();
for (final Criteria c : subCriteriaList) {
final Optional<Criteria> subjectCriteria = getSubjectCriteria(c, keyName);
if (subjectCriteria.isPresent()) {
return subjectCriteria;
}
}
return Optional.empty();
}
} |
This logic is there so that you can only set the limit once. So for example if you run .withLimit(2) and then .withLimit(4), the 2 is what will be honored. | public void withLimit(int limit) {
if (this.limit == 0) {
this.limit = limit;
}
} | if (this.limit == 0) { | public void withLimit(int limit) {
if (this.limit == 0) {
this.limit = limit;
}
} | class CosmosQuery {
private final Criteria criteria;
private Sort sort = Sort.unsorted();
private Pageable pageable = Pageable.unpaged();
private int limit;
private long offset;
/**
* Initialization
*
* @param criteria object
*/
public CosmosQuery(@NonNull Criteria criteria) {
this.criteria = criteria;
}
/**
* To get Criteria object
*
* @return Criteria
*/
public Criteria getCriteria() {
return criteria;
}
/**
* To get Sort object
*
* @return Sort
*/
public Sort getSort() {
return sort;
}
/**
* To get Pageable object
*
* @return Pageable
*/
public Pageable getPageable() {
return pageable;
}
/**
* To get limit number
*
* @return int limit
*/
public int getLimit() {
return limit;
}
/**
* To get offset number
*
* @return long offset
*/
public long getOffset() {
return offset;
}
/**
* To set limit number
*
* @param limit int
*/
/**
* To set limit number and offset number
*
* @param offset long
* @param limit int
*/
public void withOffsetAndLimit(long offset, int limit) {
if (this.limit == 0) {
this.limit = limit;
}
this.offset = offset;
}
/**
* With Sort
*
* @param sort Sort
* @return DocumentQuery object
*/
public CosmosQuery with(@NonNull Sort sort) {
if (sort.isSorted()) {
this.sort = sort.and(this.sort);
}
return this;
}
/**
* With Sort
*
* @param pageable Sort
* @return DocumentQuery object
*/
public CosmosQuery with(@NonNull Pageable pageable) {
Assert.notNull(pageable, "pageable should not be null");
this.pageable = pageable;
return this;
}
private boolean isCrossPartitionQuery(@NonNull String keyName) {
Assert.hasText(keyName, "PartitionKey should have text.");
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, keyName);
return criteria.map(criteria1 -> {
if (isEqualCriteria(criteria1)) {
return false;
}
if (criteria1.getType() == CriteriaType.IN && criteria1.getSubjectValues().size() == 1) {
@SuppressWarnings("unchecked")
Collection<Object> collection = (Collection<Object>) criteria1.getSubjectValues().get(0);
return collection.size() != 1;
}
return !hasKeywordAnd();
}).orElse(true);
}
private boolean hasKeywordOr() {
return this.criteria.getType() == CriteriaType.OR;
}
private boolean hasKeywordAnd() {
return this.criteria.getType() == CriteriaType.AND;
}
private boolean isEqualCriteria(Criteria criteria) {
return criteria.getType() == CriteriaType.IS_EQUAL;
}
/**
* Indicate if DocumentQuery should enable cross partition query.
*
* @param partitionKeys The list of partitionKey names.
* @return If DocumentQuery should enable cross partition query
*/
public boolean isCrossPartitionQuery(@NonNull List<String> partitionKeys) {
if (partitionKeys.isEmpty()) {
return true;
}
return partitionKeys.stream().filter(this::isCrossPartitionQuery)
.findFirst()
.map(p -> true)
.orElse(hasKeywordOr());
}
/**
* Returns true if this criteria or sub-criteria has partition key field present as one of the subjects.
* @param partitionKeyFieldName partition key field name
* @return returns true if this criteria or sub criteria has partition key field present as one of the subjects.
*/
public boolean hasPartitionKeyCriteria(@NonNull String partitionKeyFieldName) {
if (partitionKeyFieldName.isEmpty()) {
return false;
}
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, partitionKeyFieldName);
return criteria.isPresent();
}
/**
* Returns partition key value based on the criteria.
* @param domainType domain type
* @param <T> entity class type
* @return Optional of partition key value
*/
public <T> Optional<Object> getPartitionKeyValue(@NonNull Class<T> domainType) {
CosmosEntityInformation<?, ?> instance = CosmosEntityInformation.getInstance(domainType);
String partitionKeyFieldName = instance.getPartitionKeyFieldName();
if (partitionKeyFieldName == null
|| partitionKeyFieldName.isEmpty()
|| isCrossPartitionQuery(Collections.singletonList(partitionKeyFieldName))) {
return Optional.empty();
}
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, partitionKeyFieldName);
return criteria.map(criteria1 -> {
if (!criteria1.getIgnoreCase().equals(Part.IgnoreCaseType.NEVER)) {
return null;
}
if (criteria1.getType() == CriteriaType.IN && criteria1.getSubjectValues().size() == 1) {
@SuppressWarnings("unchecked")
Collection<Object> collection = (Collection<Object>) criteria1.getSubjectValues().get(0);
if (collection.size() == 1) {
return collection.iterator().next();
} else {
return null;
}
}
return criteria1.getSubjectValues().get(0);
});
}
/**
* To get criteria by type
*
* @param criteriaType the criteria type
* @return Optional
*/
public Optional<Criteria> getCriteriaByType(@NonNull CriteriaType criteriaType) {
return getCriteriaByType(criteriaType, this.criteria);
}
private Optional<Criteria> getCriteriaByType(@NonNull CriteriaType criteriaType, @NonNull Criteria criteria) {
if (criteria.getType().equals(criteriaType)) {
return Optional.of(criteria);
}
for (final Criteria subCriteria : criteria.getSubCriteria()) {
if (getCriteriaByType(criteriaType, subCriteria).isPresent()) {
return Optional.of(subCriteria);
}
}
return Optional.empty();
}
private Optional<Criteria> getSubjectCriteria(@NonNull Criteria criteria, @NonNull String keyName) {
if (keyName.equals(criteria.getSubject())) {
return Optional.of(criteria);
}
final List<Criteria> subCriteriaList = criteria.getSubCriteria();
for (final Criteria c : subCriteriaList) {
final Optional<Criteria> subjectCriteria = getSubjectCriteria(c, keyName);
if (subjectCriteria.isPresent()) {
return subjectCriteria;
}
}
return Optional.empty();
}
} | class CosmosQuery {
private final Criteria criteria;
private Sort sort = Sort.unsorted();
private Pageable pageable = Pageable.unpaged();
private int limit;
private long offset;
/**
* Initialization
*
* @param criteria object
*/
public CosmosQuery(@NonNull Criteria criteria) {
this.criteria = criteria;
}
/**
* To get Criteria object
*
* @return Criteria
*/
public Criteria getCriteria() {
return criteria;
}
/**
* To get Sort object
*
* @return Sort
*/
public Sort getSort() {
return sort;
}
/**
* To get Pageable object
*
* @return Pageable
*/
public Pageable getPageable() {
return pageable;
}
/**
* To get limit number
*
* @return int limit
*/
public int getLimit() {
return limit;
}
/**
* To get offset number
*
* @return long offset
*/
public long getOffset() {
return offset;
}
/**
* To set limit number
*
* @param limit int
* @deprecated use withLimit instead
*/
@Deprecated
public void setLimit(int limit) {
if (this.limit == 0) {
this.limit = limit;
}
}
/**
* To set limit number
*
* @param limit int
*/
/**
* To set limit number and offset number
*
* @param offset long
* @param limit int
*/
public void withOffsetAndLimit(long offset, int limit) {
if (this.limit == 0) {
this.limit = limit;
}
if (this.offset == 0) {
this.offset = offset;
}
}
/**
* With Sort
*
* @param sort Sort
* @return DocumentQuery object
*/
public CosmosQuery with(@NonNull Sort sort) {
if (sort.isSorted()) {
this.sort = sort.and(this.sort);
}
return this;
}
/**
* With Sort
*
* @param pageable Sort
* @return DocumentQuery object
*/
public CosmosQuery with(@NonNull Pageable pageable) {
Assert.notNull(pageable, "pageable should not be null");
this.pageable = pageable;
return this;
}
private boolean isCrossPartitionQuery(@NonNull String keyName) {
Assert.hasText(keyName, "PartitionKey should have text.");
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, keyName);
return criteria.map(criteria1 -> {
if (isEqualCriteria(criteria1)) {
return false;
}
if (criteria1.getType() == CriteriaType.IN && criteria1.getSubjectValues().size() == 1) {
@SuppressWarnings("unchecked")
Collection<Object> collection = (Collection<Object>) criteria1.getSubjectValues().get(0);
return collection.size() != 1;
}
return !hasKeywordAnd();
}).orElse(true);
}
private boolean hasKeywordOr() {
return this.criteria.getType() == CriteriaType.OR;
}
private boolean hasKeywordAnd() {
return this.criteria.getType() == CriteriaType.AND;
}
private boolean isEqualCriteria(Criteria criteria) {
return criteria.getType() == CriteriaType.IS_EQUAL;
}
/**
* Indicate if DocumentQuery should enable cross partition query.
*
* @param partitionKeys The list of partitionKey names.
* @return If DocumentQuery should enable cross partition query
*/
public boolean isCrossPartitionQuery(@NonNull List<String> partitionKeys) {
if (partitionKeys.isEmpty()) {
return true;
}
return partitionKeys.stream().filter(this::isCrossPartitionQuery)
.findFirst()
.map(p -> true)
.orElse(hasKeywordOr());
}
/**
* Returns true if this criteria or sub-criteria has partition key field present as one of the subjects.
* @param partitionKeyFieldName partition key field name
* @return returns true if this criteria or sub criteria has partition key field present as one of the subjects.
*/
public boolean hasPartitionKeyCriteria(@NonNull String partitionKeyFieldName) {
if (partitionKeyFieldName.isEmpty()) {
return false;
}
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, partitionKeyFieldName);
return criteria.isPresent();
}
/**
* Returns partition key value based on the criteria.
* @param domainType domain type
* @param <T> entity class type
* @return Optional of partition key value
*/
public <T> Optional<Object> getPartitionKeyValue(@NonNull Class<T> domainType) {
CosmosEntityInformation<?, ?> instance = CosmosEntityInformation.getInstance(domainType);
String partitionKeyFieldName = instance.getPartitionKeyFieldName();
if (partitionKeyFieldName == null
|| partitionKeyFieldName.isEmpty()
|| isCrossPartitionQuery(Collections.singletonList(partitionKeyFieldName))) {
return Optional.empty();
}
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, partitionKeyFieldName);
return criteria.map(criteria1 -> {
if (!criteria1.getIgnoreCase().equals(Part.IgnoreCaseType.NEVER)) {
return null;
}
if (criteria1.getType() == CriteriaType.IN && criteria1.getSubjectValues().size() == 1) {
@SuppressWarnings("unchecked")
Collection<Object> collection = (Collection<Object>) criteria1.getSubjectValues().get(0);
if (collection.size() == 1) {
return collection.iterator().next();
} else {
return null;
}
}
return criteria1.getSubjectValues().get(0);
});
}
/**
* To get criteria by type
*
* @param criteriaType the criteria type
* @return Optional
*/
public Optional<Criteria> getCriteriaByType(@NonNull CriteriaType criteriaType) {
return getCriteriaByType(criteriaType, this.criteria);
}
private Optional<Criteria> getCriteriaByType(@NonNull CriteriaType criteriaType, @NonNull Criteria criteria) {
if (criteria.getType().equals(criteriaType)) {
return Optional.of(criteria);
}
for (final Criteria subCriteria : criteria.getSubCriteria()) {
if (getCriteriaByType(criteriaType, subCriteria).isPresent()) {
return Optional.of(subCriteria);
}
}
return Optional.empty();
}
private Optional<Criteria> getSubjectCriteria(@NonNull Criteria criteria, @NonNull String keyName) {
if (keyName.equals(criteria.getSubject())) {
return Optional.of(criteria);
}
final List<Criteria> subCriteriaList = criteria.getSubCriteria();
for (final Criteria c : subCriteriaList) {
final Optional<Criteria> subjectCriteria = getSubjectCriteria(c, keyName);
if (subjectCriteria.isPresent()) {
return subjectCriteria;
}
}
return Optional.empty();
}
} |
do we need to do the same checking as limit? (can only set once?) | public void withOffsetAndLimit(long offset, int limit) {
if (this.limit == 0) {
this.limit = limit;
}
this.offset = offset;
} | this.offset = offset; | public void withOffsetAndLimit(long offset, int limit) {
if (this.limit == 0) {
this.limit = limit;
}
if (this.offset == 0) {
this.offset = offset;
}
} | class CosmosQuery {
private final Criteria criteria;
private Sort sort = Sort.unsorted();
private Pageable pageable = Pageable.unpaged();
private int limit;
private long offset;
/**
* Initialization
*
* @param criteria object
*/
public CosmosQuery(@NonNull Criteria criteria) {
this.criteria = criteria;
}
/**
* To get Criteria object
*
* @return Criteria
*/
public Criteria getCriteria() {
return criteria;
}
/**
* To get Sort object
*
* @return Sort
*/
public Sort getSort() {
return sort;
}
/**
* To get Pageable object
*
* @return Pageable
*/
public Pageable getPageable() {
return pageable;
}
/**
* To get limit number
*
* @return int limit
*/
public int getLimit() {
return limit;
}
/**
* To get offset number
*
* @return long offset
*/
public long getOffset() {
return offset;
}
/**
* To set limit number
*
* @param limit int
* @deprecated use withLimit instead
*/
@Deprecated
public void setLimit(int limit) {
if (this.limit == 0) {
this.limit = limit;
}
}
/**
* To set limit number
*
* @param limit int
*/
public void withLimit(int limit) {
if (this.limit == 0) {
this.limit = limit;
}
}
/**
* To set limit number and offset number
*
* @param offset long
* @param limit int
*/
/**
* With Sort
*
* @param sort Sort
* @return DocumentQuery object
*/
public CosmosQuery with(@NonNull Sort sort) {
if (sort.isSorted()) {
this.sort = sort.and(this.sort);
}
return this;
}
/**
* With Sort
*
* @param pageable Sort
* @return DocumentQuery object
*/
public CosmosQuery with(@NonNull Pageable pageable) {
Assert.notNull(pageable, "pageable should not be null");
this.pageable = pageable;
return this;
}
private boolean isCrossPartitionQuery(@NonNull String keyName) {
Assert.hasText(keyName, "PartitionKey should have text.");
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, keyName);
return criteria.map(criteria1 -> {
if (isEqualCriteria(criteria1)) {
return false;
}
if (criteria1.getType() == CriteriaType.IN && criteria1.getSubjectValues().size() == 1) {
@SuppressWarnings("unchecked")
Collection<Object> collection = (Collection<Object>) criteria1.getSubjectValues().get(0);
return collection.size() != 1;
}
return !hasKeywordAnd();
}).orElse(true);
}
private boolean hasKeywordOr() {
return this.criteria.getType() == CriteriaType.OR;
}
private boolean hasKeywordAnd() {
return this.criteria.getType() == CriteriaType.AND;
}
private boolean isEqualCriteria(Criteria criteria) {
return criteria.getType() == CriteriaType.IS_EQUAL;
}
/**
* Indicate if DocumentQuery should enable cross partition query.
*
* @param partitionKeys The list of partitionKey names.
* @return If DocumentQuery should enable cross partition query
*/
public boolean isCrossPartitionQuery(@NonNull List<String> partitionKeys) {
if (partitionKeys.isEmpty()) {
return true;
}
return partitionKeys.stream().filter(this::isCrossPartitionQuery)
.findFirst()
.map(p -> true)
.orElse(hasKeywordOr());
}
/**
* Returns true if this criteria or sub-criteria has partition key field present as one of the subjects.
* @param partitionKeyFieldName partition key field name
* @return returns true if this criteria or sub criteria has partition key field present as one of the subjects.
*/
public boolean hasPartitionKeyCriteria(@NonNull String partitionKeyFieldName) {
if (partitionKeyFieldName.isEmpty()) {
return false;
}
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, partitionKeyFieldName);
return criteria.isPresent();
}
/**
* Returns partition key value based on the criteria.
* @param domainType domain type
* @param <T> entity class type
* @return Optional of partition key value
*/
public <T> Optional<Object> getPartitionKeyValue(@NonNull Class<T> domainType) {
CosmosEntityInformation<?, ?> instance = CosmosEntityInformation.getInstance(domainType);
String partitionKeyFieldName = instance.getPartitionKeyFieldName();
if (partitionKeyFieldName == null
|| partitionKeyFieldName.isEmpty()
|| isCrossPartitionQuery(Collections.singletonList(partitionKeyFieldName))) {
return Optional.empty();
}
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, partitionKeyFieldName);
return criteria.map(criteria1 -> {
if (!criteria1.getIgnoreCase().equals(Part.IgnoreCaseType.NEVER)) {
return null;
}
if (criteria1.getType() == CriteriaType.IN && criteria1.getSubjectValues().size() == 1) {
@SuppressWarnings("unchecked")
Collection<Object> collection = (Collection<Object>) criteria1.getSubjectValues().get(0);
if (collection.size() == 1) {
return collection.iterator().next();
} else {
return null;
}
}
return criteria1.getSubjectValues().get(0);
});
}
/**
* To get criteria by type
*
* @param criteriaType the criteria type
* @return Optional
*/
public Optional<Criteria> getCriteriaByType(@NonNull CriteriaType criteriaType) {
return getCriteriaByType(criteriaType, this.criteria);
}
private Optional<Criteria> getCriteriaByType(@NonNull CriteriaType criteriaType, @NonNull Criteria criteria) {
if (criteria.getType().equals(criteriaType)) {
return Optional.of(criteria);
}
for (final Criteria subCriteria : criteria.getSubCriteria()) {
if (getCriteriaByType(criteriaType, subCriteria).isPresent()) {
return Optional.of(subCriteria);
}
}
return Optional.empty();
}
private Optional<Criteria> getSubjectCriteria(@NonNull Criteria criteria, @NonNull String keyName) {
if (keyName.equals(criteria.getSubject())) {
return Optional.of(criteria);
}
final List<Criteria> subCriteriaList = criteria.getSubCriteria();
for (final Criteria c : subCriteriaList) {
final Optional<Criteria> subjectCriteria = getSubjectCriteria(c, keyName);
if (subjectCriteria.isPresent()) {
return subjectCriteria;
}
}
return Optional.empty();
}
} | class CosmosQuery {
private final Criteria criteria;
private Sort sort = Sort.unsorted();
private Pageable pageable = Pageable.unpaged();
private int limit;
private long offset;
/**
* Initialization
*
* @param criteria object
*/
public CosmosQuery(@NonNull Criteria criteria) {
this.criteria = criteria;
}
/**
* To get Criteria object
*
* @return Criteria
*/
public Criteria getCriteria() {
return criteria;
}
/**
* To get Sort object
*
* @return Sort
*/
public Sort getSort() {
return sort;
}
/**
* To get Pageable object
*
* @return Pageable
*/
public Pageable getPageable() {
return pageable;
}
/**
* To get limit number
*
* @return int limit
*/
public int getLimit() {
return limit;
}
/**
* To get offset number
*
* @return long offset
*/
public long getOffset() {
return offset;
}
/**
* To set limit number
*
* @param limit int
* @deprecated use withLimit instead
*/
@Deprecated
public void setLimit(int limit) {
if (this.limit == 0) {
this.limit = limit;
}
}
/**
* To set limit number
*
* @param limit int
*/
public void withLimit(int limit) {
if (this.limit == 0) {
this.limit = limit;
}
}
/**
* To set limit number and offset number
*
* @param offset long
* @param limit int
*/
/**
* With Sort
*
* @param sort Sort
* @return DocumentQuery object
*/
public CosmosQuery with(@NonNull Sort sort) {
if (sort.isSorted()) {
this.sort = sort.and(this.sort);
}
return this;
}
/**
* With Sort
*
* @param pageable Sort
* @return DocumentQuery object
*/
public CosmosQuery with(@NonNull Pageable pageable) {
Assert.notNull(pageable, "pageable should not be null");
this.pageable = pageable;
return this;
}
private boolean isCrossPartitionQuery(@NonNull String keyName) {
Assert.hasText(keyName, "PartitionKey should have text.");
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, keyName);
return criteria.map(criteria1 -> {
if (isEqualCriteria(criteria1)) {
return false;
}
if (criteria1.getType() == CriteriaType.IN && criteria1.getSubjectValues().size() == 1) {
@SuppressWarnings("unchecked")
Collection<Object> collection = (Collection<Object>) criteria1.getSubjectValues().get(0);
return collection.size() != 1;
}
return !hasKeywordAnd();
}).orElse(true);
}
private boolean hasKeywordOr() {
return this.criteria.getType() == CriteriaType.OR;
}
private boolean hasKeywordAnd() {
return this.criteria.getType() == CriteriaType.AND;
}
private boolean isEqualCriteria(Criteria criteria) {
return criteria.getType() == CriteriaType.IS_EQUAL;
}
/**
* Indicate if DocumentQuery should enable cross partition query.
*
* @param partitionKeys The list of partitionKey names.
* @return If DocumentQuery should enable cross partition query
*/
public boolean isCrossPartitionQuery(@NonNull List<String> partitionKeys) {
if (partitionKeys.isEmpty()) {
return true;
}
return partitionKeys.stream().filter(this::isCrossPartitionQuery)
.findFirst()
.map(p -> true)
.orElse(hasKeywordOr());
}
/**
* Returns true if this criteria or sub-criteria has partition key field present as one of the subjects.
* @param partitionKeyFieldName partition key field name
* @return returns true if this criteria or sub criteria has partition key field present as one of the subjects.
*/
public boolean hasPartitionKeyCriteria(@NonNull String partitionKeyFieldName) {
if (partitionKeyFieldName.isEmpty()) {
return false;
}
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, partitionKeyFieldName);
return criteria.isPresent();
}
/**
* Returns partition key value based on the criteria.
* @param domainType domain type
* @param <T> entity class type
* @return Optional of partition key value
*/
public <T> Optional<Object> getPartitionKeyValue(@NonNull Class<T> domainType) {
CosmosEntityInformation<?, ?> instance = CosmosEntityInformation.getInstance(domainType);
String partitionKeyFieldName = instance.getPartitionKeyFieldName();
if (partitionKeyFieldName == null
|| partitionKeyFieldName.isEmpty()
|| isCrossPartitionQuery(Collections.singletonList(partitionKeyFieldName))) {
return Optional.empty();
}
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, partitionKeyFieldName);
return criteria.map(criteria1 -> {
if (!criteria1.getIgnoreCase().equals(Part.IgnoreCaseType.NEVER)) {
return null;
}
if (criteria1.getType() == CriteriaType.IN && criteria1.getSubjectValues().size() == 1) {
@SuppressWarnings("unchecked")
Collection<Object> collection = (Collection<Object>) criteria1.getSubjectValues().get(0);
if (collection.size() == 1) {
return collection.iterator().next();
} else {
return null;
}
}
return criteria1.getSubjectValues().get(0);
});
}
/**
* To get criteria by type
*
* @param criteriaType the criteria type
* @return Optional
*/
public Optional<Criteria> getCriteriaByType(@NonNull CriteriaType criteriaType) {
return getCriteriaByType(criteriaType, this.criteria);
}
private Optional<Criteria> getCriteriaByType(@NonNull CriteriaType criteriaType, @NonNull Criteria criteria) {
if (criteria.getType().equals(criteriaType)) {
return Optional.of(criteria);
}
for (final Criteria subCriteria : criteria.getSubCriteria()) {
if (getCriteriaByType(criteriaType, subCriteria).isPresent()) {
return Optional.of(subCriteria);
}
}
return Optional.empty();
}
private Optional<Criteria> getSubjectCriteria(@NonNull Criteria criteria, @NonNull String keyName) {
if (keyName.equals(criteria.getSubject())) {
return Optional.of(criteria);
}
final List<Criteria> subCriteriaList = criteria.getSubCriteria();
for (final Criteria c : subCriteriaList) {
final Optional<Criteria> subjectCriteria = getSubjectCriteria(c, keyName);
if (subjectCriteria.isPresent()) {
return subjectCriteria;
}
}
return Optional.empty();
}
} |
Done! | public void withOffsetAndLimit(long offset, int limit) {
if (this.limit == 0) {
this.limit = limit;
}
this.offset = offset;
} | this.offset = offset; | public void withOffsetAndLimit(long offset, int limit) {
if (this.limit == 0) {
this.limit = limit;
}
if (this.offset == 0) {
this.offset = offset;
}
} | class CosmosQuery {
private final Criteria criteria;
private Sort sort = Sort.unsorted();
private Pageable pageable = Pageable.unpaged();
private int limit;
private long offset;
/**
* Initialization
*
* @param criteria object
*/
public CosmosQuery(@NonNull Criteria criteria) {
this.criteria = criteria;
}
/**
* To get Criteria object
*
* @return Criteria
*/
public Criteria getCriteria() {
return criteria;
}
/**
* To get Sort object
*
* @return Sort
*/
public Sort getSort() {
return sort;
}
/**
* To get Pageable object
*
* @return Pageable
*/
public Pageable getPageable() {
return pageable;
}
/**
* To get limit number
*
* @return int limit
*/
public int getLimit() {
return limit;
}
/**
* To get offset number
*
* @return long offset
*/
public long getOffset() {
return offset;
}
/**
* To set limit number
*
* @param limit int
* @deprecated use withLimit instead
*/
@Deprecated
public void setLimit(int limit) {
if (this.limit == 0) {
this.limit = limit;
}
}
/**
* To set limit number
*
* @param limit int
*/
public void withLimit(int limit) {
if (this.limit == 0) {
this.limit = limit;
}
}
/**
* To set limit number and offset number
*
* @param offset long
* @param limit int
*/
/**
* With Sort
*
* @param sort Sort
* @return DocumentQuery object
*/
public CosmosQuery with(@NonNull Sort sort) {
if (sort.isSorted()) {
this.sort = sort.and(this.sort);
}
return this;
}
/**
* With Sort
*
* @param pageable Sort
* @return DocumentQuery object
*/
public CosmosQuery with(@NonNull Pageable pageable) {
Assert.notNull(pageable, "pageable should not be null");
this.pageable = pageable;
return this;
}
private boolean isCrossPartitionQuery(@NonNull String keyName) {
Assert.hasText(keyName, "PartitionKey should have text.");
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, keyName);
return criteria.map(criteria1 -> {
if (isEqualCriteria(criteria1)) {
return false;
}
if (criteria1.getType() == CriteriaType.IN && criteria1.getSubjectValues().size() == 1) {
@SuppressWarnings("unchecked")
Collection<Object> collection = (Collection<Object>) criteria1.getSubjectValues().get(0);
return collection.size() != 1;
}
return !hasKeywordAnd();
}).orElse(true);
}
private boolean hasKeywordOr() {
return this.criteria.getType() == CriteriaType.OR;
}
private boolean hasKeywordAnd() {
return this.criteria.getType() == CriteriaType.AND;
}
private boolean isEqualCriteria(Criteria criteria) {
return criteria.getType() == CriteriaType.IS_EQUAL;
}
/**
* Indicate if DocumentQuery should enable cross partition query.
*
* @param partitionKeys The list of partitionKey names.
* @return If DocumentQuery should enable cross partition query
*/
public boolean isCrossPartitionQuery(@NonNull List<String> partitionKeys) {
if (partitionKeys.isEmpty()) {
return true;
}
return partitionKeys.stream().filter(this::isCrossPartitionQuery)
.findFirst()
.map(p -> true)
.orElse(hasKeywordOr());
}
/**
* Returns true if this criteria or sub-criteria has partition key field present as one of the subjects.
* @param partitionKeyFieldName partition key field name
* @return returns true if this criteria or sub criteria has partition key field present as one of the subjects.
*/
public boolean hasPartitionKeyCriteria(@NonNull String partitionKeyFieldName) {
if (partitionKeyFieldName.isEmpty()) {
return false;
}
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, partitionKeyFieldName);
return criteria.isPresent();
}
/**
* Returns partition key value based on the criteria.
* @param domainType domain type
* @param <T> entity class type
* @return Optional of partition key value
*/
public <T> Optional<Object> getPartitionKeyValue(@NonNull Class<T> domainType) {
CosmosEntityInformation<?, ?> instance = CosmosEntityInformation.getInstance(domainType);
String partitionKeyFieldName = instance.getPartitionKeyFieldName();
if (partitionKeyFieldName == null
|| partitionKeyFieldName.isEmpty()
|| isCrossPartitionQuery(Collections.singletonList(partitionKeyFieldName))) {
return Optional.empty();
}
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, partitionKeyFieldName);
return criteria.map(criteria1 -> {
if (!criteria1.getIgnoreCase().equals(Part.IgnoreCaseType.NEVER)) {
return null;
}
if (criteria1.getType() == CriteriaType.IN && criteria1.getSubjectValues().size() == 1) {
@SuppressWarnings("unchecked")
Collection<Object> collection = (Collection<Object>) criteria1.getSubjectValues().get(0);
if (collection.size() == 1) {
return collection.iterator().next();
} else {
return null;
}
}
return criteria1.getSubjectValues().get(0);
});
}
/**
* To get criteria by type
*
* @param criteriaType the criteria type
* @return Optional
*/
public Optional<Criteria> getCriteriaByType(@NonNull CriteriaType criteriaType) {
return getCriteriaByType(criteriaType, this.criteria);
}
private Optional<Criteria> getCriteriaByType(@NonNull CriteriaType criteriaType, @NonNull Criteria criteria) {
if (criteria.getType().equals(criteriaType)) {
return Optional.of(criteria);
}
for (final Criteria subCriteria : criteria.getSubCriteria()) {
if (getCriteriaByType(criteriaType, subCriteria).isPresent()) {
return Optional.of(subCriteria);
}
}
return Optional.empty();
}
private Optional<Criteria> getSubjectCriteria(@NonNull Criteria criteria, @NonNull String keyName) {
if (keyName.equals(criteria.getSubject())) {
return Optional.of(criteria);
}
final List<Criteria> subCriteriaList = criteria.getSubCriteria();
for (final Criteria c : subCriteriaList) {
final Optional<Criteria> subjectCriteria = getSubjectCriteria(c, keyName);
if (subjectCriteria.isPresent()) {
return subjectCriteria;
}
}
return Optional.empty();
}
} | class CosmosQuery {
private final Criteria criteria;
private Sort sort = Sort.unsorted();
private Pageable pageable = Pageable.unpaged();
private int limit;
private long offset;
/**
* Initialization
*
* @param criteria object
*/
public CosmosQuery(@NonNull Criteria criteria) {
this.criteria = criteria;
}
/**
* To get Criteria object
*
* @return Criteria
*/
public Criteria getCriteria() {
return criteria;
}
/**
* To get Sort object
*
* @return Sort
*/
public Sort getSort() {
return sort;
}
/**
* To get Pageable object
*
* @return Pageable
*/
public Pageable getPageable() {
return pageable;
}
/**
* To get limit number
*
* @return int limit
*/
public int getLimit() {
return limit;
}
/**
* To get offset number
*
* @return long offset
*/
public long getOffset() {
return offset;
}
/**
* To set limit number
*
* @param limit int
* @deprecated use withLimit instead
*/
@Deprecated
public void setLimit(int limit) {
if (this.limit == 0) {
this.limit = limit;
}
}
/**
* To set limit number
*
* @param limit int
*/
public void withLimit(int limit) {
if (this.limit == 0) {
this.limit = limit;
}
}
/**
* To set limit number and offset number
*
* @param offset long
* @param limit int
*/
/**
* With Sort
*
* @param sort Sort
* @return DocumentQuery object
*/
public CosmosQuery with(@NonNull Sort sort) {
if (sort.isSorted()) {
this.sort = sort.and(this.sort);
}
return this;
}
/**
* With Sort
*
* @param pageable Sort
* @return DocumentQuery object
*/
public CosmosQuery with(@NonNull Pageable pageable) {
Assert.notNull(pageable, "pageable should not be null");
this.pageable = pageable;
return this;
}
private boolean isCrossPartitionQuery(@NonNull String keyName) {
Assert.hasText(keyName, "PartitionKey should have text.");
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, keyName);
return criteria.map(criteria1 -> {
if (isEqualCriteria(criteria1)) {
return false;
}
if (criteria1.getType() == CriteriaType.IN && criteria1.getSubjectValues().size() == 1) {
@SuppressWarnings("unchecked")
Collection<Object> collection = (Collection<Object>) criteria1.getSubjectValues().get(0);
return collection.size() != 1;
}
return !hasKeywordAnd();
}).orElse(true);
}
private boolean hasKeywordOr() {
return this.criteria.getType() == CriteriaType.OR;
}
private boolean hasKeywordAnd() {
return this.criteria.getType() == CriteriaType.AND;
}
private boolean isEqualCriteria(Criteria criteria) {
return criteria.getType() == CriteriaType.IS_EQUAL;
}
/**
* Indicate if DocumentQuery should enable cross partition query.
*
* @param partitionKeys The list of partitionKey names.
* @return If DocumentQuery should enable cross partition query
*/
public boolean isCrossPartitionQuery(@NonNull List<String> partitionKeys) {
if (partitionKeys.isEmpty()) {
return true;
}
return partitionKeys.stream().filter(this::isCrossPartitionQuery)
.findFirst()
.map(p -> true)
.orElse(hasKeywordOr());
}
/**
* Returns true if this criteria or sub-criteria has partition key field present as one of the subjects.
* @param partitionKeyFieldName partition key field name
* @return returns true if this criteria or sub criteria has partition key field present as one of the subjects.
*/
public boolean hasPartitionKeyCriteria(@NonNull String partitionKeyFieldName) {
if (partitionKeyFieldName.isEmpty()) {
return false;
}
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, partitionKeyFieldName);
return criteria.isPresent();
}
/**
* Returns partition key value based on the criteria.
* @param domainType domain type
* @param <T> entity class type
* @return Optional of partition key value
*/
public <T> Optional<Object> getPartitionKeyValue(@NonNull Class<T> domainType) {
CosmosEntityInformation<?, ?> instance = CosmosEntityInformation.getInstance(domainType);
String partitionKeyFieldName = instance.getPartitionKeyFieldName();
if (partitionKeyFieldName == null
|| partitionKeyFieldName.isEmpty()
|| isCrossPartitionQuery(Collections.singletonList(partitionKeyFieldName))) {
return Optional.empty();
}
final Optional<Criteria> criteria = this.getSubjectCriteria(this.criteria, partitionKeyFieldName);
return criteria.map(criteria1 -> {
if (!criteria1.getIgnoreCase().equals(Part.IgnoreCaseType.NEVER)) {
return null;
}
if (criteria1.getType() == CriteriaType.IN && criteria1.getSubjectValues().size() == 1) {
@SuppressWarnings("unchecked")
Collection<Object> collection = (Collection<Object>) criteria1.getSubjectValues().get(0);
if (collection.size() == 1) {
return collection.iterator().next();
} else {
return null;
}
}
return criteria1.getSubjectValues().get(0);
});
}
/**
* To get criteria by type
*
* @param criteriaType the criteria type
* @return Optional
*/
public Optional<Criteria> getCriteriaByType(@NonNull CriteriaType criteriaType) {
return getCriteriaByType(criteriaType, this.criteria);
}
private Optional<Criteria> getCriteriaByType(@NonNull CriteriaType criteriaType, @NonNull Criteria criteria) {
if (criteria.getType().equals(criteriaType)) {
return Optional.of(criteria);
}
for (final Criteria subCriteria : criteria.getSubCriteria()) {
if (getCriteriaByType(criteriaType, subCriteria).isPresent()) {
return Optional.of(subCriteria);
}
}
return Optional.empty();
}
private Optional<Criteria> getSubjectCriteria(@NonNull Criteria criteria, @NonNull String keyName) {
if (keyName.equals(criteria.getSubject())) {
return Optional.of(criteria);
}
final List<Criteria> subCriteriaList = criteria.getSubCriteria();
for (final Criteria c : subCriteriaList) {
final Optional<Criteria> subjectCriteria = getSubjectCriteria(c, keyName);
if (subjectCriteria.isPresent()) {
return subjectCriteria;
}
}
return Optional.empty();
}
} |
should this part logic located in RxGatewayStoreModel or within generatePath | private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
int documentIdPrefixPosition = -1;
boolean needToEncodeIdInUriForGateway = request.getResourceType() == ResourceType.Document &&
(documentIdPrefixPosition = resourceName.indexOf("/docs/")) > 0 &&
this.getStoreProxy(request) == this.gatewayProxy;
if (needToEncodeIdInUriForGateway) {
String encodedResourceName = resourceName.substring(0, documentIdPrefixPosition + 6) +
Strings.encodeURIComponent(resourceName.substring(documentIdPrefixPosition + 6));
if (!resourceName.equals(encodedResourceName)) {
request.setResourceAddress(encodedResourceName);
}
}
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
} | if (needToEncodeIdInUriForGateway) { | private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
if (this.getStoreProxy(request) == this.gatewayProxy) {
this.gatewayProxy.prepareRequestForAuth(request, resourceName);
}
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private ApiType apiType;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final ClientTelemetryConfig clientTelemetryConfig;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
ClientTelemetryConfig clientTelemetryConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
ClientTelemetryConfig clientTelemetryConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
ClientTelemetryConfig clientTelemetryConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length <= 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
ClientTelemetryConfig clientTelemetryConfig) {
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
return BridgeInternal.createCosmosDiagnostics(this);
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
((RxGatewayStoreModel)this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
((RxGatewayStoreModel)this.gatewayProxy).setCollectionCache(this.collectionCache);
((RxGatewayStoreModel)this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
((RxGatewayStoreModel)this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
UUID.randomUUID().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
userAgentContainer.getUserAgent(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager
);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations
);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public URI getWriteEndpoint() {
return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null);
}
@Override
public URI getReadEndpoint() {
return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(CosmosQueryRequestOptions options) {
return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
UUID correlationActivityIdOfRequestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getCorrelationActivityId(options);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this,
getOperationContextAndListenerTuple(options));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(options));
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
resourceLink, sqlQuery, options, klass, resourceTypeEnum, queryClient, correlationActivityId),
invalidPartitionExceptionRetryPolicy);
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(this, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
return iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, CosmosQueryRequestOptions options) {
return queryDatabases(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
CosmosQueryRequestOptions options) {
return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if(options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null &&
options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
public static PartitionKeyInternal extractPartitionKeyValueFromDocument(
InternalObjectNode document,
PartitionKeyDefinition partitionKeyDefinition) {
if (partitionKeyDefinition != null) {
switch (partitionKeyDefinition.getKind()) {
case HASH:
String path = partitionKeyDefinition.getPaths().iterator().next();
List<String> parts = PathParser.getPathParts(path);
if (parts.size() >= 1) {
Object value = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, parts);
if (value == null || value.getClass() == ObjectNode.class) {
value = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
}
if (value instanceof PartitionKeyInternal) {
return (PartitionKeyInternal) value;
} else {
return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false);
}
}
break;
case MULTI_HASH:
Object[] partitionKeyValues = new Object[partitionKeyDefinition.getPaths().size()];
for(int pathIter = 0 ; pathIter < partitionKeyDefinition.getPaths().size(); pathIter++){
String partitionPath = partitionKeyDefinition.getPaths().get(pathIter);
List<String> partitionPathParts = PathParser.getPathParts(partitionPath);
partitionKeyValues[pathIter] = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, partitionPathParts);
}
return PartitionKeyInternal.fromObjectArray(partitionKeyValues, false);
default:
throw new IllegalArgumentException("Unrecognized Partition kind: " + partitionKeyDefinition.getKind());
}
}
return null;
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = BridgeInternal.serializeJsonToByteBuffer(document, mapper);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> patchDocumentInternal(documentLink, cosmosPatchOperations, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
return requestObs.flatMap(req -> patch(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, null, options, requestRetryPolicy), requestRetryPolicy);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, internalObjectNode, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, internalObjectNode, options, collectionObs);
return requestObs.flatMap(req -> this
.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, CosmosQueryRequestOptions options, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", options, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
CosmosQueryRequestOptions options,
Class<T> klass) {
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap =
new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
itemIdentityList
.forEach(itemIdentity -> {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap;
rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap,
collection.getPartitionKey());
return createReadManyQuery(
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
options,
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap))
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponse(finalList, headers);
return frp;
});
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector);
} else {
sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
if (!Objects.equals(idValue, pkValue)) {
continue;
}
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(this, queryClient, collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, CosmosQueryRequestOptions options, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), options, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
CosmosQueryRequestOptions options,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, options, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
CosmosQueryRequestOptions options,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
final CosmosQueryRequestOptions effectiveOptions =
ModelBridgeInternal.createQueryRequestOptions(options);
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId);
});
},
invalidPartitionExceptionRetryPolicy);
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
return request;
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
List<Object> procedureParams) {
return this.executeStoredProcedure(storedProcedureLink, null, procedureParams);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
String query, CosmosQueryRequestOptions options) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, CosmosQueryRequestOptions options) {
return queryUsers(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, String query,
CosmosQueryRequestOptions options) {
return queryClientEncryptionKeys(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy());
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return readFeed(options, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
CosmosQueryRequestOptions options) {
return queryPermissions(userLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(CosmosQueryRequestOptions options) {
return readFeed(options, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> readFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
if (options == null) {
options = new CosmosQueryRequestOptions();
}
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
final CosmosQueryRequestOptions finalCosmosQueryRequestOptions = options;
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalCosmosQueryRequestOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> ObservableHelper
.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(finalCosmosQueryRequestOptions, klass),
klass)),
retryPolicy);
return Paginator.getPaginatedQueryResultAsObservable(
options, createRequestFunc, executeFunc, maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, CosmosQueryRequestOptions options) {
return queryOffers(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
@Override
public DatabaseAccount getLatestDatabaseAccount() {
return this.globalEndpointManager.getLatestDatabaseAccount();
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.UseGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
this.storeModel.enableThroughputControl(throughputControlStore);
}
this.throughputControlStore.enableThroughputControlGroup(group);
}
@Override
public Flux<OpenConnectionResponse> openConnectionsAndInitCaches(String containerLink) {
checkArgument(StringUtils.isNotEmpty(containerLink), "Argument 'containerLink' should not be null nor empty");
return this.storeModel.openConnectionsAndInitCaches(containerLink);
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(RxDocumentServiceRequest request, String collectionLink) {
logger.debug("getFeedRange collectionLink=[{}]", collectionLink);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxGatewayStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private ApiType apiType;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final ClientTelemetryConfig clientTelemetryConfig;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
ClientTelemetryConfig clientTelemetryConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
ClientTelemetryConfig clientTelemetryConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
ClientTelemetryConfig clientTelemetryConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length <= 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
ClientTelemetryConfig clientTelemetryConfig) {
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
return BridgeInternal.createCosmosDiagnostics(this);
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
(this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
(this.gatewayProxy).setCollectionCache(this.collectionCache);
(this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
(this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
UUID.randomUUID().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
userAgentContainer.getUserAgent(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager
);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations
);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public URI getWriteEndpoint() {
return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null);
}
@Override
public URI getReadEndpoint() {
return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(CosmosQueryRequestOptions options) {
return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
UUID correlationActivityIdOfRequestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getCorrelationActivityId(options);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this,
getOperationContextAndListenerTuple(options));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(options));
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
resourceLink, sqlQuery, options, klass, resourceTypeEnum, queryClient, correlationActivityId),
invalidPartitionExceptionRetryPolicy);
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(this, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
return iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, CosmosQueryRequestOptions options) {
return queryDatabases(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
CosmosQueryRequestOptions options) {
return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if(options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null &&
options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
public static PartitionKeyInternal extractPartitionKeyValueFromDocument(
InternalObjectNode document,
PartitionKeyDefinition partitionKeyDefinition) {
if (partitionKeyDefinition != null) {
switch (partitionKeyDefinition.getKind()) {
case HASH:
String path = partitionKeyDefinition.getPaths().iterator().next();
List<String> parts = PathParser.getPathParts(path);
if (parts.size() >= 1) {
Object value = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, parts);
if (value == null || value.getClass() == ObjectNode.class) {
value = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
}
if (value instanceof PartitionKeyInternal) {
return (PartitionKeyInternal) value;
} else {
return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false);
}
}
break;
case MULTI_HASH:
Object[] partitionKeyValues = new Object[partitionKeyDefinition.getPaths().size()];
for(int pathIter = 0 ; pathIter < partitionKeyDefinition.getPaths().size(); pathIter++){
String partitionPath = partitionKeyDefinition.getPaths().get(pathIter);
List<String> partitionPathParts = PathParser.getPathParts(partitionPath);
partitionKeyValues[pathIter] = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, partitionPathParts);
}
return PartitionKeyInternal.fromObjectArray(partitionKeyValues, false);
default:
throw new IllegalArgumentException("Unrecognized Partition kind: " + partitionKeyDefinition.getKind());
}
}
return null;
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = BridgeInternal.serializeJsonToByteBuffer(document, mapper);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> patchDocumentInternal(documentLink, cosmosPatchOperations, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
return requestObs.flatMap(req -> patch(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, null, options, requestRetryPolicy), requestRetryPolicy);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, internalObjectNode, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, internalObjectNode, options, collectionObs);
return requestObs.flatMap(req -> this
.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, CosmosQueryRequestOptions options, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", options, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
CosmosQueryRequestOptions options,
Class<T> klass) {
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap =
new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
itemIdentityList
.forEach(itemIdentity -> {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap;
rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap,
collection.getPartitionKey());
return createReadManyQuery(
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
options,
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap))
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponse(finalList, headers);
return frp;
});
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector);
} else {
sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
if (!Objects.equals(idValue, pkValue)) {
continue;
}
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(this, queryClient, collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, CosmosQueryRequestOptions options, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), options, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
CosmosQueryRequestOptions options,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, options, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
CosmosQueryRequestOptions options,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
final CosmosQueryRequestOptions effectiveOptions =
ModelBridgeInternal.createQueryRequestOptions(options);
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId);
});
},
invalidPartitionExceptionRetryPolicy);
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
return request;
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
List<Object> procedureParams) {
return this.executeStoredProcedure(storedProcedureLink, null, procedureParams);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
String query, CosmosQueryRequestOptions options) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, CosmosQueryRequestOptions options) {
return queryUsers(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, String query,
CosmosQueryRequestOptions options) {
return queryClientEncryptionKeys(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy());
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return readFeed(options, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
CosmosQueryRequestOptions options) {
return queryPermissions(userLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(CosmosQueryRequestOptions options) {
return readFeed(options, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> readFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
if (options == null) {
options = new CosmosQueryRequestOptions();
}
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
final CosmosQueryRequestOptions finalCosmosQueryRequestOptions = options;
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalCosmosQueryRequestOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> ObservableHelper
.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(finalCosmosQueryRequestOptions, klass),
klass)),
retryPolicy);
return Paginator.getPaginatedQueryResultAsObservable(
options, createRequestFunc, executeFunc, maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, CosmosQueryRequestOptions options) {
return queryOffers(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
@Override
public DatabaseAccount getLatestDatabaseAccount() {
return this.globalEndpointManager.getLatestDatabaseAccount();
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.UseGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
this.storeModel.enableThroughputControl(throughputControlStore);
}
this.throughputControlStore.enableThroughputControlGroup(group);
}
@Override
public Flux<OpenConnectionResponse> openConnectionsAndInitCaches(String containerLink) {
checkArgument(StringUtils.isNotEmpty(containerLink), "Argument 'containerLink' should not be null nor empty");
return this.storeModel.openConnectionsAndInitCaches(containerLink);
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(RxDocumentServiceRequest request, String collectionLink) {
logger.debug("getFeedRange collectionLink=[{}]", collectionLink);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
} |
Fixed | private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
int documentIdPrefixPosition = -1;
boolean needToEncodeIdInUriForGateway = request.getResourceType() == ResourceType.Document &&
(documentIdPrefixPosition = resourceName.indexOf("/docs/")) > 0 &&
this.getStoreProxy(request) == this.gatewayProxy;
if (needToEncodeIdInUriForGateway) {
String encodedResourceName = resourceName.substring(0, documentIdPrefixPosition + 6) +
Strings.encodeURIComponent(resourceName.substring(documentIdPrefixPosition + 6));
if (!resourceName.equals(encodedResourceName)) {
request.setResourceAddress(encodedResourceName);
}
}
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
} | if (needToEncodeIdInUriForGateway) { | private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
if (this.getStoreProxy(request) == this.gatewayProxy) {
this.gatewayProxy.prepareRequestForAuth(request, resourceName);
}
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private ApiType apiType;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final ClientTelemetryConfig clientTelemetryConfig;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
ClientTelemetryConfig clientTelemetryConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
ClientTelemetryConfig clientTelemetryConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
ClientTelemetryConfig clientTelemetryConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length <= 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
ClientTelemetryConfig clientTelemetryConfig) {
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
return BridgeInternal.createCosmosDiagnostics(this);
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
((RxGatewayStoreModel)this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
((RxGatewayStoreModel)this.gatewayProxy).setCollectionCache(this.collectionCache);
((RxGatewayStoreModel)this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
((RxGatewayStoreModel)this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
UUID.randomUUID().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
userAgentContainer.getUserAgent(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager
);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations
);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public URI getWriteEndpoint() {
return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null);
}
@Override
public URI getReadEndpoint() {
return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(CosmosQueryRequestOptions options) {
return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
UUID correlationActivityIdOfRequestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getCorrelationActivityId(options);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this,
getOperationContextAndListenerTuple(options));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(options));
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
resourceLink, sqlQuery, options, klass, resourceTypeEnum, queryClient, correlationActivityId),
invalidPartitionExceptionRetryPolicy);
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(this, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
return iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, CosmosQueryRequestOptions options) {
return queryDatabases(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
CosmosQueryRequestOptions options) {
return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if(options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null &&
options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
public static PartitionKeyInternal extractPartitionKeyValueFromDocument(
InternalObjectNode document,
PartitionKeyDefinition partitionKeyDefinition) {
if (partitionKeyDefinition != null) {
switch (partitionKeyDefinition.getKind()) {
case HASH:
String path = partitionKeyDefinition.getPaths().iterator().next();
List<String> parts = PathParser.getPathParts(path);
if (parts.size() >= 1) {
Object value = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, parts);
if (value == null || value.getClass() == ObjectNode.class) {
value = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
}
if (value instanceof PartitionKeyInternal) {
return (PartitionKeyInternal) value;
} else {
return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false);
}
}
break;
case MULTI_HASH:
Object[] partitionKeyValues = new Object[partitionKeyDefinition.getPaths().size()];
for(int pathIter = 0 ; pathIter < partitionKeyDefinition.getPaths().size(); pathIter++){
String partitionPath = partitionKeyDefinition.getPaths().get(pathIter);
List<String> partitionPathParts = PathParser.getPathParts(partitionPath);
partitionKeyValues[pathIter] = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, partitionPathParts);
}
return PartitionKeyInternal.fromObjectArray(partitionKeyValues, false);
default:
throw new IllegalArgumentException("Unrecognized Partition kind: " + partitionKeyDefinition.getKind());
}
}
return null;
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = BridgeInternal.serializeJsonToByteBuffer(document, mapper);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> patchDocumentInternal(documentLink, cosmosPatchOperations, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
return requestObs.flatMap(req -> patch(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, null, options, requestRetryPolicy), requestRetryPolicy);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, internalObjectNode, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, internalObjectNode, options, collectionObs);
return requestObs.flatMap(req -> this
.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, CosmosQueryRequestOptions options, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", options, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
CosmosQueryRequestOptions options,
Class<T> klass) {
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap =
new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
itemIdentityList
.forEach(itemIdentity -> {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap;
rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap,
collection.getPartitionKey());
return createReadManyQuery(
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
options,
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap))
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponse(finalList, headers);
return frp;
});
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector);
} else {
sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
if (!Objects.equals(idValue, pkValue)) {
continue;
}
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(this, queryClient, collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, CosmosQueryRequestOptions options, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), options, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
CosmosQueryRequestOptions options,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, options, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
CosmosQueryRequestOptions options,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
final CosmosQueryRequestOptions effectiveOptions =
ModelBridgeInternal.createQueryRequestOptions(options);
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId);
});
},
invalidPartitionExceptionRetryPolicy);
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
return request;
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
List<Object> procedureParams) {
return this.executeStoredProcedure(storedProcedureLink, null, procedureParams);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
String query, CosmosQueryRequestOptions options) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, CosmosQueryRequestOptions options) {
return queryUsers(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, String query,
CosmosQueryRequestOptions options) {
return queryClientEncryptionKeys(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy());
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return readFeed(options, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
CosmosQueryRequestOptions options) {
return queryPermissions(userLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(CosmosQueryRequestOptions options) {
return readFeed(options, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> readFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
if (options == null) {
options = new CosmosQueryRequestOptions();
}
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
final CosmosQueryRequestOptions finalCosmosQueryRequestOptions = options;
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalCosmosQueryRequestOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> ObservableHelper
.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(finalCosmosQueryRequestOptions, klass),
klass)),
retryPolicy);
return Paginator.getPaginatedQueryResultAsObservable(
options, createRequestFunc, executeFunc, maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, CosmosQueryRequestOptions options) {
return queryOffers(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
@Override
public DatabaseAccount getLatestDatabaseAccount() {
return this.globalEndpointManager.getLatestDatabaseAccount();
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.UseGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
this.storeModel.enableThroughputControl(throughputControlStore);
}
this.throughputControlStore.enableThroughputControlGroup(group);
}
@Override
public Flux<OpenConnectionResponse> openConnectionsAndInitCaches(String containerLink) {
checkArgument(StringUtils.isNotEmpty(containerLink), "Argument 'containerLink' should not be null nor empty");
return this.storeModel.openConnectionsAndInitCaches(containerLink);
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(RxDocumentServiceRequest request, String collectionLink) {
logger.debug("getFeedRange collectionLink=[{}]", collectionLink);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxGatewayStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private ApiType apiType;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final ClientTelemetryConfig clientTelemetryConfig;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
ClientTelemetryConfig clientTelemetryConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
ClientTelemetryConfig clientTelemetryConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
ClientTelemetryConfig clientTelemetryConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length <= 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
ClientTelemetryConfig clientTelemetryConfig) {
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
return BridgeInternal.createCosmosDiagnostics(this);
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
(this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
(this.gatewayProxy).setCollectionCache(this.collectionCache);
(this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
(this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
UUID.randomUUID().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
userAgentContainer.getUserAgent(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager
);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations
);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public URI getWriteEndpoint() {
return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null);
}
@Override
public URI getReadEndpoint() {
return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(CosmosQueryRequestOptions options) {
return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
UUID correlationActivityIdOfRequestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getCorrelationActivityId(options);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this,
getOperationContextAndListenerTuple(options));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(options));
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
resourceLink, sqlQuery, options, klass, resourceTypeEnum, queryClient, correlationActivityId),
invalidPartitionExceptionRetryPolicy);
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(this, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
return iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, CosmosQueryRequestOptions options) {
return queryDatabases(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
CosmosQueryRequestOptions options) {
return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if(options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null &&
options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
public static PartitionKeyInternal extractPartitionKeyValueFromDocument(
InternalObjectNode document,
PartitionKeyDefinition partitionKeyDefinition) {
if (partitionKeyDefinition != null) {
switch (partitionKeyDefinition.getKind()) {
case HASH:
String path = partitionKeyDefinition.getPaths().iterator().next();
List<String> parts = PathParser.getPathParts(path);
if (parts.size() >= 1) {
Object value = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, parts);
if (value == null || value.getClass() == ObjectNode.class) {
value = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
}
if (value instanceof PartitionKeyInternal) {
return (PartitionKeyInternal) value;
} else {
return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false);
}
}
break;
case MULTI_HASH:
Object[] partitionKeyValues = new Object[partitionKeyDefinition.getPaths().size()];
for(int pathIter = 0 ; pathIter < partitionKeyDefinition.getPaths().size(); pathIter++){
String partitionPath = partitionKeyDefinition.getPaths().get(pathIter);
List<String> partitionPathParts = PathParser.getPathParts(partitionPath);
partitionKeyValues[pathIter] = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, partitionPathParts);
}
return PartitionKeyInternal.fromObjectArray(partitionKeyValues, false);
default:
throw new IllegalArgumentException("Unrecognized Partition kind: " + partitionKeyDefinition.getKind());
}
}
return null;
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = BridgeInternal.serializeJsonToByteBuffer(document, mapper);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> patchDocumentInternal(documentLink, cosmosPatchOperations, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
return requestObs.flatMap(req -> patch(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, null, options, requestRetryPolicy), requestRetryPolicy);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, internalObjectNode, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, internalObjectNode, options, collectionObs);
return requestObs.flatMap(req -> this
.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, CosmosQueryRequestOptions options, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", options, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
CosmosQueryRequestOptions options,
Class<T> klass) {
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap =
new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
itemIdentityList
.forEach(itemIdentity -> {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap;
rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap,
collection.getPartitionKey());
return createReadManyQuery(
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
options,
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap))
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponse(finalList, headers);
return frp;
});
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector);
} else {
sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
if (!Objects.equals(idValue, pkValue)) {
continue;
}
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(this, queryClient, collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, CosmosQueryRequestOptions options, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), options, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
CosmosQueryRequestOptions options,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, options, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
CosmosQueryRequestOptions options,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
final CosmosQueryRequestOptions effectiveOptions =
ModelBridgeInternal.createQueryRequestOptions(options);
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId);
});
},
invalidPartitionExceptionRetryPolicy);
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
return request;
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
List<Object> procedureParams) {
return this.executeStoredProcedure(storedProcedureLink, null, procedureParams);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
String query, CosmosQueryRequestOptions options) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, CosmosQueryRequestOptions options) {
return queryUsers(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, String query,
CosmosQueryRequestOptions options) {
return queryClientEncryptionKeys(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy());
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return readFeed(options, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
CosmosQueryRequestOptions options) {
return queryPermissions(userLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(CosmosQueryRequestOptions options) {
return readFeed(options, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> readFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
if (options == null) {
options = new CosmosQueryRequestOptions();
}
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
final CosmosQueryRequestOptions finalCosmosQueryRequestOptions = options;
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalCosmosQueryRequestOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> ObservableHelper
.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(finalCosmosQueryRequestOptions, klass),
klass)),
retryPolicy);
return Paginator.getPaginatedQueryResultAsObservable(
options, createRequestFunc, executeFunc, maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, CosmosQueryRequestOptions options) {
return queryOffers(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
@Override
public DatabaseAccount getLatestDatabaseAccount() {
return this.globalEndpointManager.getLatestDatabaseAccount();
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.UseGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
this.storeModel.enableThroughputControl(throughputControlStore);
}
this.throughputControlStore.enableThroughputControlGroup(group);
}
@Override
public Flux<OpenConnectionResponse> openConnectionsAndInitCaches(String containerLink) {
checkArgument(StringUtils.isNotEmpty(containerLink), "Argument 'containerLink' should not be null nor empty");
return this.storeModel.openConnectionsAndInitCaches(containerLink);
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(RxDocumentServiceRequest request, String collectionLink) {
logger.debug("getFeedRange collectionLink=[{}]", collectionLink);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
} |
should it be GsonJsonReader here (and in other places in this file) ? | public JsonReader reset() {
if (!resetSupported) {
throw new IllegalStateException("'reset' isn't supported by this JsonReader.");
}
if (jsonBytes != null) {
return DefaultJsonReader.fromBytes(jsonBytes);
} else {
return DefaultJsonReader.fromString(jsonString);
}
} | return DefaultJsonReader.fromBytes(jsonBytes); | public JsonReader reset() {
if (!resetSupported) {
throw new IllegalStateException("'reset' isn't supported by this JsonReader.");
}
if (jsonBytes != null) {
return DefaultJsonReader.fromBytes(jsonBytes);
} else {
return DefaultJsonReader.fromString(jsonString);
}
} | class GsonJsonReader extends JsonReader {
private final com.google.gson.stream.JsonReader reader;
private final byte[] jsonBytes;
private final String jsonString;
private final boolean resetSupported;
private com.google.gson.stream.JsonToken gsonCurrentToken;
private JsonToken currentToken;
/**
* Constructs an instance of {@link GsonJsonReader} from a {@code byte[]}.
*
* @param json JSON {@code byte[]}.
* @return An instance of {@link GsonJsonReader}.
*/
public static JsonReader fromBytes(byte[] json) {
return new GsonJsonReader(new InputStreamReader(new ByteArrayInputStream(json), StandardCharsets.UTF_8),
true, json, null);
}
/**
* Constructs an instance of {@link GsonJsonReader} from a String.
*
* @param json JSON String.
* @return An instance of {@link GsonJsonReader}.
*/
public static JsonReader fromString(String json) {
return new GsonJsonReader(new StringReader(json), true, null, json);
}
/**
* Constructs an instance of {@link GsonJsonReader} from an {@link InputStream}.
*
* @param json JSON {@link InputStream}.
* @return An instance of {@link GsonJsonReader}.
*/
public static JsonReader fromStream(InputStream json) {
return new GsonJsonReader(new InputStreamReader(json, StandardCharsets.UTF_8), false, null, null);
}
private GsonJsonReader(Reader reader, boolean resetSupported, byte[] jsonBytes, String jsonString) {
this.reader = new com.google.gson.stream.JsonReader(reader);
this.resetSupported = resetSupported;
this.jsonBytes = jsonBytes;
this.jsonString = jsonString;
}
@Override
public JsonToken currentToken() {
return currentToken;
}
@Override
public JsonToken nextToken() {
if (gsonCurrentToken == com.google.gson.stream.JsonToken.BEGIN_OBJECT) {
invokeWithWrappedIoException(reader::beginObject);
} else if (gsonCurrentToken == com.google.gson.stream.JsonToken.END_OBJECT) {
invokeWithWrappedIoException(reader::endObject);
} else if (gsonCurrentToken == com.google.gson.stream.JsonToken.BEGIN_ARRAY) {
invokeWithWrappedIoException(reader::beginArray);
} else if (gsonCurrentToken == com.google.gson.stream.JsonToken.END_ARRAY) {
invokeWithWrappedIoException(reader::endArray);
} else if (currentToken == JsonToken.NULL) {
invokeWithWrappedIoException(reader::nextNull);
}
gsonCurrentToken = callWithWrappedIoException(reader::peek);
currentToken = mapToken(gsonCurrentToken);
return currentToken;
}
@Override
public byte[] getBinaryValue() {
if (currentToken == JsonToken.NULL) {
invokeWithWrappedIoException(reader::nextNull);
return null;
} else {
return Base64.getDecoder().decode(callWithWrappedIoException(reader::nextString));
}
}
@Override
public boolean getBooleanValue() {
return callWithWrappedIoException(reader::nextBoolean);
}
@Override
public double getDoubleValue() {
return callWithWrappedIoException(reader::nextDouble);
}
@Override
public float getFloatValue() {
return callWithWrappedIoException(reader::nextDouble).floatValue();
}
@Override
public int getIntValue() {
return callWithWrappedIoException(reader::nextInt);
}
@Override
public long getLongValue() {
return callWithWrappedIoException(reader::nextLong);
}
@Override
public String getStringValue() {
if (currentToken == JsonToken.NULL) {
return null;
} else {
return callWithWrappedIoException(reader::nextString);
}
}
@Override
public String getFieldName() {
return callWithWrappedIoException(reader::nextName);
}
@Override
public void skipChildren() {
invokeWithWrappedIoException(reader::skipValue);
}
@Override
public JsonReader bufferObject() {
StringBuilder bufferedObject = new StringBuilder();
if (isStartArrayOrObject()) {
readChildren(bufferedObject);
} else if (currentToken() == JsonToken.FIELD_NAME) {
bufferedObject.append("{");
JsonToken token = currentToken();
boolean needsComa = false;
while (token != JsonToken.END_OBJECT) {
if (needsComa) {
bufferedObject.append(",");
}
if (token == JsonToken.FIELD_NAME) {
bufferedObject.append("\"").append(getFieldName()).append("\":");
needsComa = false;
} else {
if (token == JsonToken.STRING) {
bufferedObject.append("\"").append(getStringValue()).append("\"");
} else if (isStartArrayOrObject()) {
readChildren(bufferedObject);
} else {
bufferedObject.append(getTextValue());
}
needsComa = true;
}
token = nextToken();
}
bufferedObject.append("}");
} else {
throw new IllegalStateException("Cannot buffer a JSON object from a non-object, non-field name "
+ "starting location. Starting location: " + currentToken());
}
return DefaultJsonReader.fromString(bufferedObject.toString());
}
@Override
public boolean resetSupported() {
return resetSupported;
}
@Override
@Override
public void close() throws IOException {
reader.close();
}
/*
* Maps the GSON JsonToken to the azure-json JsonToken.
*/
private static JsonToken mapToken(com.google.gson.stream.JsonToken token) {
if (token == null) {
return null;
}
switch (token) {
case BEGIN_OBJECT:
return JsonToken.START_OBJECT;
case END_OBJECT:
case END_DOCUMENT:
return JsonToken.END_OBJECT;
case BEGIN_ARRAY:
return JsonToken.START_ARRAY;
case END_ARRAY:
return JsonToken.END_ARRAY;
case NAME:
return JsonToken.FIELD_NAME;
case STRING:
return JsonToken.STRING;
case NUMBER:
return JsonToken.NUMBER;
case BOOLEAN:
return JsonToken.BOOLEAN;
case NULL:
return JsonToken.NULL;
default:
throw new IllegalStateException("Unsupported token type: '" + token + "'.");
}
}
} | class GsonJsonReader extends JsonReader {
private final com.google.gson.stream.JsonReader reader;
private final byte[] jsonBytes;
private final String jsonString;
private final boolean resetSupported;
private com.google.gson.stream.JsonToken gsonCurrentToken;
private JsonToken currentToken;
/**
* Constructs an instance of {@link GsonJsonReader} from a {@code byte[]}.
*
* @param json JSON {@code byte[]}.
* @return An instance of {@link GsonJsonReader}.
*/
public static JsonReader fromBytes(byte[] json) {
return new GsonJsonReader(new InputStreamReader(new ByteArrayInputStream(json), StandardCharsets.UTF_8),
true, json, null);
}
/**
* Constructs an instance of {@link GsonJsonReader} from a String.
*
* @param json JSON String.
* @return An instance of {@link GsonJsonReader}.
*/
public static JsonReader fromString(String json) {
return new GsonJsonReader(new StringReader(json), true, null, json);
}
/**
* Constructs an instance of {@link GsonJsonReader} from an {@link InputStream}.
*
* @param json JSON {@link InputStream}.
* @return An instance of {@link GsonJsonReader}.
*/
public static JsonReader fromStream(InputStream json) {
return new GsonJsonReader(new InputStreamReader(json, StandardCharsets.UTF_8), false, null, null);
}
private GsonJsonReader(Reader reader, boolean resetSupported, byte[] jsonBytes, String jsonString) {
this.reader = new com.google.gson.stream.JsonReader(reader);
this.resetSupported = resetSupported;
this.jsonBytes = jsonBytes;
this.jsonString = jsonString;
}
@Override
public JsonToken currentToken() {
return currentToken;
}
@Override
public JsonToken nextToken() {
try {
if (gsonCurrentToken == com.google.gson.stream.JsonToken.BEGIN_OBJECT) {
reader.beginObject();
} else if (gsonCurrentToken == com.google.gson.stream.JsonToken.END_OBJECT) {
reader.endObject();
} else if (gsonCurrentToken == com.google.gson.stream.JsonToken.BEGIN_ARRAY) {
reader.beginArray();
} else if (gsonCurrentToken == com.google.gson.stream.JsonToken.END_ARRAY) {
reader.endArray();
} else if (currentToken == JsonToken.NULL) {
reader.nextNull();
}
gsonCurrentToken = reader.peek();
currentToken = mapToken(gsonCurrentToken);
return currentToken;
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public byte[] getBinaryValue() {
try {
if (currentToken == JsonToken.NULL) {
reader.nextNull();
return null;
} else {
return Base64.getDecoder().decode(reader.nextString());
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public boolean getBooleanValue() {
try {
return reader.nextBoolean();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public double getDoubleValue() {
try {
return reader.nextDouble();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public float getFloatValue() {
try {
return (float) reader.nextDouble();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public int getIntValue() {
try {
return reader.nextInt();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public long getLongValue() {
try {
return reader.nextLong();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public String getStringValue() {
try {
if (currentToken == JsonToken.NULL) {
return null;
} else {
return reader.nextString();
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public String getFieldName() {
try {
return reader.nextName();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public void skipChildren() {
try {
reader.skipValue();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public JsonReader bufferObject() {
StringBuilder bufferedObject = new StringBuilder();
if (isStartArrayOrObject()) {
readChildren(bufferedObject);
} else if (currentToken() == JsonToken.FIELD_NAME) {
bufferedObject.append("{");
JsonToken token = currentToken();
boolean needsComa = false;
while (token != JsonToken.END_OBJECT) {
if (needsComa) {
bufferedObject.append(",");
}
if (token == JsonToken.FIELD_NAME) {
bufferedObject.append("\"").append(getFieldName()).append("\":");
needsComa = false;
} else {
if (token == JsonToken.STRING) {
bufferedObject.append("\"").append(getStringValue()).append("\"");
} else if (isStartArrayOrObject()) {
readChildren(bufferedObject);
} else {
bufferedObject.append(getTextValue());
}
needsComa = true;
}
token = nextToken();
}
bufferedObject.append("}");
} else {
throw new IllegalStateException("Cannot buffer a JSON object from a non-object, non-field name "
+ "starting location. Starting location: " + currentToken());
}
return DefaultJsonReader.fromString(bufferedObject.toString());
}
@Override
public boolean resetSupported() {
return resetSupported;
}
@Override
@Override
public void close() throws IOException {
reader.close();
}
/*
* Maps the GSON JsonToken to the azure-json JsonToken.
*/
private static JsonToken mapToken(com.google.gson.stream.JsonToken token) {
if (token == null) {
return null;
}
switch (token) {
case BEGIN_OBJECT:
return JsonToken.START_OBJECT;
case END_OBJECT:
case END_DOCUMENT:
return JsonToken.END_OBJECT;
case BEGIN_ARRAY:
return JsonToken.START_ARRAY;
case END_ARRAY:
return JsonToken.END_ARRAY;
case NAME:
return JsonToken.FIELD_NAME;
case STRING:
return JsonToken.STRING;
case NUMBER:
return JsonToken.NUMBER;
case BOOLEAN:
return JsonToken.BOOLEAN;
case NULL:
return JsonToken.NULL;
default:
throw new IllegalStateException("Unsupported token type: '" + token + "'.");
}
}
} |
Does `map` catch this throw and convert it into a `Mono.error`? | public static Mono<Void> writeToWritableByteChannel(Flux<ByteBuffer> content, WritableByteChannel channel) {
if (content == null && channel == null) {
return monoError(LOGGER, new NullPointerException("'content' and 'channel' cannot be null."));
} else if (content == null) {
return monoError(LOGGER, new NullPointerException("'content' cannot be null."));
} else if (channel == null) {
return monoError(LOGGER, new NullPointerException("'channel' cannot be null."));
}
return content.publishOn(Schedulers.boundedElastic())
.map(buffer -> {
while (buffer.hasRemaining()) {
try {
channel.write(buffer);
} catch (IOException e) {
throw Exceptions.propagate(e);
}
}
return buffer;
}).then();
} | throw Exceptions.propagate(e); | public static Mono<Void> writeToWritableByteChannel(Flux<ByteBuffer> content, WritableByteChannel channel) {
if (content == null && channel == null) {
return monoError(LOGGER, new NullPointerException("'content' and 'channel' cannot be null."));
} else if (content == null) {
return monoError(LOGGER, new NullPointerException("'content' cannot be null."));
} else if (channel == null) {
return monoError(LOGGER, new NullPointerException("'channel' cannot be null."));
}
return content.publishOn(Schedulers.boundedElastic())
.map(buffer -> {
while (buffer.hasRemaining()) {
try {
channel.write(buffer);
} catch (IOException e) {
throw Exceptions.propagate(e);
}
}
return buffer;
}).then();
} | class FluxUtil {
private static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
private static final ClientLogger LOGGER = new ClientLogger(FluxUtil.class);
/**
* Checks if a type is Flux<ByteBuffer>.
*
* @param entityType the type to check
* @return whether the type represents a Flux that emits ByteBuffer
*/
public static boolean isFluxByteBuffer(Type entityType) {
if (TypeUtil.isTypeOrSubTypeOf(entityType, Flux.class)) {
final Type innerType = TypeUtil.getTypeArguments(entityType)[0];
return TypeUtil.isTypeOrSubTypeOf(innerType, ByteBuffer.class);
}
return false;
}
/**
* Collects ByteBuffers emitted by a Flux into a byte array.
*
* @param stream A stream which emits ByteBuffer instances.
* @return A Mono which emits the concatenation of all the ByteBuffer instances given by the source Flux.
* @throws IllegalStateException If the combined size of the emitted ByteBuffers is greater than {@link
* Integer
*/
public static Mono<byte[]> collectBytesInByteBufferStream(Flux<ByteBuffer> stream) {
return stream.collect(ByteBufferCollector::new, ByteBufferCollector::write)
.map(ByteBufferCollector::toByteArray);
}
/**
* Collects ByteBuffers emitted by a Flux into a byte array.
* <p>
* Unlike {@link
* This size hint allows for optimizations when creating the initial buffer to reduce the number of times it needs
* to be resized while concatenating emitted ByteBuffers.
*
* @param stream A stream which emits ByteBuffer instances.
* @param sizeHint A hint about the expected stream size.
* @return A Mono which emits the concatenation of all the ByteBuffer instances given by the source Flux.
* @throws IllegalArgumentException If {@code sizeHint} is equal to or less than {@code 0}.
* @throws IllegalStateException If the combined size of the emitted ByteBuffers is greater than {@link
* Integer
*/
public static Mono<byte[]> collectBytesInByteBufferStream(Flux<ByteBuffer> stream, int sizeHint) {
return stream.collect(() -> new ByteBufferCollector(sizeHint), ByteBufferCollector::write)
.map(ByteBufferCollector::toByteArray);
}
/**
* Collects ByteBuffers returned in a network response into a byte array.
* <p>
* The {@code headers} are inspected for containing an {@code Content-Length} which determines if a size hinted
* collection, {@link
*
*
* @param stream A network response ByteBuffer stream.
* @param headers The HTTP headers of the response.
* @return A Mono which emits the collected network response ByteBuffers.
* @throws NullPointerException If {@code headers} is null.
* @throws IllegalStateException If the size of the network response is greater than {@link Integer
*/
public static Mono<byte[]> collectBytesFromNetworkResponse(Flux<ByteBuffer> stream, HttpHeaders headers) {
Objects.requireNonNull(headers, "'headers' cannot be null.");
String contentLengthHeader = headers.getValue("Content-Length");
if (contentLengthHeader == null) {
return FluxUtil.collectBytesInByteBufferStream(stream);
} else {
try {
int contentLength = Integer.parseInt(contentLengthHeader);
if (contentLength > 0) {
return FluxUtil.collectBytesInByteBufferStream(stream, contentLength);
} else {
return Mono.just(EMPTY_BYTE_ARRAY);
}
} catch (NumberFormatException ex) {
return FluxUtil.collectBytesInByteBufferStream(stream);
}
}
}
/**
* Gets the content of the provided ByteBuffer as a byte array. This method will create a new byte array even if the
* ByteBuffer can have optionally backing array.
*
* @param byteBuffer the byte buffer
* @return the byte array
*/
public static byte[] byteBufferToArray(ByteBuffer byteBuffer) {
int length = byteBuffer.remaining();
byte[] byteArray = new byte[length];
byteBuffer.get(byteArray);
return byteArray;
}
/**
* Creates a {@link Flux} that is capable of resuming a download by applying retry logic when an error occurs.
*
* @param downloadSupplier Supplier of the initial download.
* @param onDownloadErrorResume {@link BiFunction} of {@link Throwable} and {@link Long} which is used to resume
* downloading when an error occurs.
* @param maxRetries The maximum number of times a download can be resumed when an error occurs.
* @return A {@link Flux} that downloads reliably.
*/
public static Flux<ByteBuffer> createRetriableDownloadFlux(Supplier<Flux<ByteBuffer>> downloadSupplier,
BiFunction<Throwable, Long, Flux<ByteBuffer>> onDownloadErrorResume, int maxRetries) {
return createRetriableDownloadFlux(downloadSupplier, onDownloadErrorResume, maxRetries, 0L);
}
/**
* Creates a {@link Flux} that is capable of resuming a download by applying retry logic when an error occurs.
*
* @param downloadSupplier Supplier of the initial download.
* @param onDownloadErrorResume {@link BiFunction} of {@link Throwable} and {@link Long} which is used to resume
* downloading when an error occurs.
* @param maxRetries The maximum number of times a download can be resumed when an error occurs.
* @param position The initial offset for the download.
* @return A {@link Flux} that downloads reliably.
*/
public static Flux<ByteBuffer> createRetriableDownloadFlux(Supplier<Flux<ByteBuffer>> downloadSupplier,
BiFunction<Throwable, Long, Flux<ByteBuffer>> onDownloadErrorResume, int maxRetries, long position) {
return new RetriableDownloadFlux(downloadSupplier, onDownloadErrorResume, maxRetries, position);
}
/**
* Converts an {@link InputStream} into a {@link Flux} of {@link ByteBuffer} using a chunk size of 4096.
* <p>
* Given that {@link InputStream} is not guaranteed to be replayable the returned {@link Flux} should be considered
* non-replayable as well.
* <p>
* If the passed {@link InputStream} is {@code null} {@link Flux
*
* @param inputStream The {@link InputStream} to convert into a {@link Flux}.
* @return A {@link Flux} of {@link ByteBuffer ByteBuffers} that contains the contents of the stream.
*/
public static Flux<ByteBuffer> toFluxByteBuffer(InputStream inputStream) {
return toFluxByteBuffer(inputStream, 4096);
}
/**
* Converts an {@link InputStream} into a {@link Flux} of {@link ByteBuffer}.
* <p>
* Given that {@link InputStream} is not guaranteed to be replayable the returned {@link Flux} should be considered
* non-replayable as well.
* <p>
* If the passed {@link InputStream} is {@code null} {@link Flux
*
* @param inputStream The {@link InputStream} to convert into a {@link Flux}.
* @param chunkSize The requested size for each {@link ByteBuffer}.
* @return A {@link Flux} of {@link ByteBuffer ByteBuffers} that contains the contents of the stream.
* @throws IllegalArgumentException If {@code chunkSize} is less than or equal to {@code 0}.
*/
public static Flux<ByteBuffer> toFluxByteBuffer(InputStream inputStream, int chunkSize) {
if (chunkSize <= 0) {
return Flux.error(new IllegalArgumentException("'chunkSize' must be greater than 0."));
}
if (inputStream == null) {
return Flux.empty();
}
if (inputStream instanceof FileInputStream) {
FileChannel fileChannel = ((FileInputStream) inputStream).getChannel();
return Flux.<ByteBuffer, FileChannel>generate(() -> fileChannel, (channel, sink) -> {
try {
long channelPosition = channel.position();
long channelSize = channel.size();
if (channelPosition == channelSize) {
channel.close();
sink.complete();
} else {
int nextByteBufferSize = (int) Math.min(chunkSize, channelSize - channelPosition);
sink.next(channel.map(FileChannel.MapMode.READ_ONLY, channelPosition, nextByteBufferSize));
channel.position(channelPosition + nextByteBufferSize);
}
} catch (IOException ex) {
sink.error(ex);
}
return channel;
});
}
return Flux.<ByteBuffer, InputStream>generate(() -> inputStream, (stream, sink) -> {
byte[] buffer = new byte[chunkSize];
try {
int offset = 0;
while (offset < chunkSize) {
int readCount = inputStream.read(buffer, offset, chunkSize - offset);
if (readCount == -1) {
if (offset > 0) {
sink.next(ByteBuffer.wrap(buffer, 0, offset));
}
sink.complete();
return stream;
}
offset += readCount;
}
sink.next(ByteBuffer.wrap(buffer));
} catch (IOException ex) {
sink.error(ex);
}
return stream;
}).filter(ByteBuffer::hasRemaining);
}
/**
* This method converts the incoming {@code deferContextual} from {@link reactor.util.context.Context Reactor
* Context} to {@link Context Azure Context} and calls the given lambda function with this context and returns a
* single entity of type {@code T}
* <p>
* If the reactor context is empty, {@link Context
* </p>
*
* <p><strong>Code samples</strong></p>
* <!-- src_embed com.azure.core.implementation.util.fluxutil.withcontext -->
* <pre>
* String prefix = "Hello, ";
* Mono<String> response = FluxUtil
* .withContext&
* </pre>
* <!-- end com.azure.core.implementation.util.fluxutil.withcontext -->
*
* @param serviceCall The lambda function that makes the service call into which azure context will be passed
* @param <T> The type of response returned from the service call
* @return The response from service call
*/
public static <T> Mono<T> withContext(Function<Context, Mono<T>> serviceCall) {
return withContext(serviceCall, Collections.emptyMap());
}
/**
* This method converts the incoming {@code deferContextual} from {@link reactor.util.context.Context Reactor
* Context} to {@link Context Azure Context}, adds the specified context attributes and calls the given lambda
* function with this context and returns a single entity of type {@code T}
* <p>
* If the reactor context is empty, {@link Context
* </p>
*
* @param serviceCall serviceCall The lambda function that makes the service call into which azure context will be
* passed
* @param contextAttributes The map of attributes sent by the calling method to be set on {@link Context}.
* @param <T> The type of response returned from the service call
* @return The response from service call
*/
public static <T> Mono<T> withContext(Function<Context, Mono<T>> serviceCall,
Map<String, String> contextAttributes) {
return Mono.deferContextual(context -> {
final Context[] azureContext = new Context[]{Context.NONE};
if (!CoreUtils.isNullOrEmpty(contextAttributes)) {
contextAttributes.forEach((key, value) -> azureContext[0] = azureContext[0].addData(key, value));
}
if (!context.isEmpty()) {
context.stream().forEach(entry ->
azureContext[0] = azureContext[0].addData(entry.getKey(), entry.getValue()));
}
return serviceCall.apply(azureContext[0]);
});
}
/**
* Converts the incoming content to Mono.
*
* @param <T> The type of the Response, which will be returned in the Mono.
* @param response whose {@link Response
* @return The converted {@link Mono}
*/
public static <T> Mono<T> toMono(Response<T> response) {
return Mono.justOrEmpty(response.getValue());
}
/**
* Propagates a {@link RuntimeException} through the error channel of {@link Mono}.
*
* @param logger The {@link ClientLogger} to log the exception.
* @param ex The {@link RuntimeException}.
* @param <T> The return type.
* @return A {@link Mono} that terminates with error wrapping the {@link RuntimeException}.
*/
public static <T> Mono<T> monoError(ClientLogger logger, RuntimeException ex) {
return Mono.error(logger.logExceptionAsError(Exceptions.propagate(ex)));
}
/**
* Propagates a {@link RuntimeException} through the error channel of {@link Mono}.
*
* @param logBuilder The {@link LoggingEventBuilder} with context to log the exception.
* @param ex The {@link RuntimeException}.
* @param <T> The return type.
* @return A {@link Mono} that terminates with error wrapping the {@link RuntimeException}.
*/
public static <T> Mono<T> monoError(LoggingEventBuilder logBuilder, RuntimeException ex) {
return Mono.error(logBuilder.log(Exceptions.propagate(ex)));
}
/**
* Propagates a {@link RuntimeException} through the error channel of {@link Flux}.
*
* @param logger The {@link ClientLogger} to log the exception.
* @param ex The {@link RuntimeException}.
* @param <T> The return type.
* @return A {@link Flux} that terminates with error wrapping the {@link RuntimeException}.
*/
public static <T> Flux<T> fluxError(ClientLogger logger, RuntimeException ex) {
return Flux.error(logger.logExceptionAsError(Exceptions.propagate(ex)));
}
/**
* Propagates a {@link RuntimeException} through the error channel of {@link PagedFlux}.
*
* @param logger The {@link ClientLogger} to log the exception.
* @param ex The {@link RuntimeException}.
* @param <T> The return type.
* @return A {@link PagedFlux} that terminates with error wrapping the {@link RuntimeException}.
*/
public static <T> PagedFlux<T> pagedFluxError(ClientLogger logger, RuntimeException ex) {
return new PagedFlux<>(() -> monoError(logger, ex));
}
/**
* This method converts the incoming {@code deferContextual} from {@link reactor.util.context.Context Reactor
* Context} to {@link Context Azure Context} and calls the given lambda function with this context and returns a
* collection of type {@code T}
* <p>
* If the reactor context is empty, {@link Context
* </p>
*
* <p><strong>Code samples</strong></p>
* <!-- src_embed com.azure.core.implementation.util.fluxutil.fluxcontext -->
* <pre>
* String prefix = "Hello, ";
* Flux<String> response = FluxUtil
* .fluxContext&
* </pre>
* <!-- end com.azure.core.implementation.util.fluxutil.fluxcontext -->
*
* @param serviceCall The lambda function that makes the service call into which the context will be passed
* @param <T> The type of response returned from the service call
* @return The response from service call
*/
public static <T> Flux<T> fluxContext(Function<Context, Flux<T>> serviceCall) {
return Flux.deferContextual(context -> serviceCall.apply(toAzureContext(context)));
}
/**
* Converts a reactor context to azure context. If the reactor context is {@code null} or empty, {@link
* Context
*
* @param context The reactor context
* @return The azure context
*/
private static Context toAzureContext(ContextView context) {
final Context[] azureContext = new Context[]{Context.NONE};
if (!context.isEmpty()) {
context.stream().forEach(entry ->
azureContext[0] = azureContext[0].addData(entry.getKey(), entry.getValue()));
}
return azureContext[0];
}
/**
* Converts an Azure context to Reactor context. If the Azure context is {@code null} or empty, {@link
* reactor.util.context.Context
*
* @param context The Azure context.
* @return The Reactor context.
*/
public static reactor.util.context.Context toReactorContext(Context context) {
if (context == null) {
return reactor.util.context.Context.empty();
}
reactor.util.context.Context returnContext = reactor.util.context.Context.empty();
Context[] contextChain = context.getContextChain();
for (Context toAdd : contextChain) {
if (toAdd == null || toAdd.getValue() == null) {
continue;
}
returnContext = returnContext.put(toAdd.getKey(), toAdd.getValue());
}
return returnContext;
}
/**
* Writes the {@link ByteBuffer ByteBuffers} emitted by a {@link Flux} of {@link ByteBuffer} to an {@link
* OutputStream}.
* <p>
* The {@code stream} is not closed by this call, closing of the {@code stream} is managed by the caller.
* <p>
* The response {@link Mono} will emit an error if {@code content} or {@code stream} are null. Additionally, an
* error will be emitted if an exception occurs while writing the {@code content} to the {@code stream}.
*
* @param content The {@link Flux} of {@link ByteBuffer} content.
* @param stream The {@link OutputStream} being written into.
* @return A {@link Mono} which emits a completion status once the {@link Flux} has been written to the {@link
* OutputStream}, or an error status if writing fails.
*/
public static Mono<Void> writeToOutputStream(Flux<ByteBuffer> content, OutputStream stream) {
if (content == null && stream == null) {
return monoError(LOGGER, new NullPointerException("'content' and 'stream' cannot be null."));
} else if (content == null) {
return monoError(LOGGER, new NullPointerException("'content' cannot be null."));
} else if (stream == null) {
return monoError(LOGGER, new NullPointerException("'stream' cannot be null."));
}
return Mono.create(emitter -> content.subscribe(new OutputStreamWriteSubscriber(emitter, stream, LOGGER)));
}
/**
* Writes the {@link ByteBuffer ByteBuffers} emitted by a {@link Flux} of {@link ByteBuffer} to an {@link
* AsynchronousFileChannel}.
* <p>
* The {@code outFile} is not closed by this call, closing of the {@code outFile} is managed by the caller.
* <p>
* The response {@link Mono} will emit an error if {@code content} or {@code outFile} are null. Additionally, an
* error will be emitted if the {@code outFile} wasn't opened with the proper open options, such as {@link
* StandardOpenOption
*
* @param content The {@link Flux} of {@link ByteBuffer} content.
* @param outFile The {@link AsynchronousFileChannel}.
* @return A {@link Mono} which emits a completion status once the {@link Flux} has been written to the {@link
* AsynchronousFileChannel}.
* @throws NullPointerException When {@code content} is null.
* @throws NullPointerException When {@code outFile} is null.
*/
public static Mono<Void> writeFile(Flux<ByteBuffer> content, AsynchronousFileChannel outFile) {
return writeFile(content, outFile, 0);
}
/**
* Writes the {@link ByteBuffer ByteBuffers} emitted by a {@link Flux} of {@link ByteBuffer} to an {@link
* AsynchronousFileChannel} starting at the given {@code position} in the file.
* <p>
* The {@code outFile} is not closed by this call, closing of the {@code outFile} is managed by the caller.
* <p>
* The response {@link Mono} will emit an error if {@code content} or {@code outFile} are null or {@code position}
* is less than 0. Additionally, an error will be emitted if the {@code outFile} wasn't opened with the proper open
* options, such as {@link StandardOpenOption
*
* @param content The {@link Flux} of {@link ByteBuffer} content.
* @param outFile The {@link AsynchronousFileChannel}.
* @param position The position in the file to begin writing the {@code content}.
* @return A {@link Mono} which emits a completion status once the {@link Flux} has been written to the {@link
* AsynchronousFileChannel}.
* @throws NullPointerException When {@code content} is null.
* @throws NullPointerException When {@code outFile} is null.
* @throws IllegalArgumentException When {@code position} is negative.
*/
public static Mono<Void> writeFile(Flux<ByteBuffer> content, AsynchronousFileChannel outFile, long position) {
if (content == null && outFile == null) {
return monoError(LOGGER, new NullPointerException("'content' and 'outFile' cannot be null."));
} else if (content == null) {
return monoError(LOGGER, new NullPointerException("'content' cannot be null."));
} else if (outFile == null) {
return monoError(LOGGER, new NullPointerException("'outFile' cannot be null."));
} else if (position < 0) {
return monoError(LOGGER, new IllegalArgumentException("'position' cannot be less than 0."));
}
return writeToAsynchronousByteChannel(content, IOUtils.toAsynchronousByteChannel(outFile, position));
}
/**
* Writes the {@link ByteBuffer ByteBuffers} emitted by a {@link Flux} of {@link ByteBuffer} to an {@link
* AsynchronousByteChannel}.
* <p>
* The {@code channel} is not closed by this call, closing of the {@code channel} is managed by the caller.
* <p>
* The response {@link Mono} will emit an error if {@code content} or {@code channel} are null.
*
* @param content The {@link Flux} of {@link ByteBuffer} content.
* @param channel The {@link AsynchronousByteChannel}.
* @return A {@link Mono} which emits a completion status once the {@link Flux} has been written to the {@link
* AsynchronousByteChannel}.
* @throws NullPointerException When {@code content} is null.
* @throws NullPointerException When {@code channel} is null.
*/
public static Mono<Void> writeToAsynchronousByteChannel(Flux<ByteBuffer> content, AsynchronousByteChannel channel) {
if (content == null && channel == null) {
return monoError(LOGGER, new NullPointerException("'content' and 'channel' cannot be null."));
} else if (content == null) {
return monoError(LOGGER, new NullPointerException("'content' cannot be null."));
} else if (channel == null) {
return monoError(LOGGER, new NullPointerException("'channel' cannot be null."));
}
return Mono.create(emitter -> content.subscribe(
new AsynchronousByteChannelWriteSubscriber(channel, emitter)));
}
/**
* Writes the {@link ByteBuffer ByteBuffers} emitted by a {@link Flux} of {@link ByteBuffer} to an {@link
* WritableByteChannel}.
* <p>
* The {@code channel} is not closed by this call, closing of the {@code channel} is managed by the caller.
* <p>
* The response {@link Mono} will emit an error if {@code content} or {@code channel} are null.
*
* @param content The {@link Flux} of {@link ByteBuffer} content.
* @param channel The {@link WritableByteChannel}.
* @return A {@link Mono} which emits a completion status once the {@link Flux} has been written to the {@link
* WritableByteChannel}.
* @throws NullPointerException When {@code content} is null.
* @throws NullPointerException When {@code channel} is null.
*/
/**
* Creates a {@link Flux} from an {@link AsynchronousFileChannel} which reads part of a file into chunks of the
* given size.
*
* @param fileChannel The file channel.
* @param chunkSize the size of file chunks to read.
* @param offset The offset in the file to begin reading.
* @param length The number of bytes to read from the file.
* @return the Flux.
*/
public static Flux<ByteBuffer> readFile(AsynchronousFileChannel fileChannel, int chunkSize, long offset,
long length) {
return new FileReadFlux(fileChannel, chunkSize, offset, length);
}
/**
* Creates a {@link Flux} from an {@link AsynchronousFileChannel} which reads part of a file.
*
* @param fileChannel The file channel.
* @param offset The offset in the file to begin reading.
* @param length The number of bytes to read from the file.
* @return the Flux.
*/
public static Flux<ByteBuffer> readFile(AsynchronousFileChannel fileChannel, long offset, long length) {
return readFile(fileChannel, DEFAULT_CHUNK_SIZE, offset, length);
}
/**
* Creates a {@link Flux} from an {@link AsynchronousFileChannel} which reads the entire file.
*
* @param fileChannel The file channel.
* @return The AsyncInputStream.
*/
public static Flux<ByteBuffer> readFile(AsynchronousFileChannel fileChannel) {
try {
long size = fileChannel.size();
return readFile(fileChannel, DEFAULT_CHUNK_SIZE, 0, size);
} catch (IOException e) {
return Flux.error(new RuntimeException("Failed to read the file.", e));
}
}
private static final int DEFAULT_CHUNK_SIZE = 1024 * 64;
private static final class FileReadFlux extends Flux<ByteBuffer> {
private final AsynchronousFileChannel fileChannel;
private final int chunkSize;
private final long offset;
private final long length;
FileReadFlux(AsynchronousFileChannel fileChannel, int chunkSize, long offset, long length) {
this.fileChannel = fileChannel;
this.chunkSize = chunkSize;
this.offset = offset;
this.length = length;
}
@Override
public void subscribe(CoreSubscriber<? super ByteBuffer> actual) {
FileReadSubscription subscription =
new FileReadSubscription(actual, fileChannel, chunkSize, offset, length);
actual.onSubscribe(subscription);
}
static final class FileReadSubscription implements Subscription, CompletionHandler<Integer, ByteBuffer> {
private static final int NOT_SET = -1;
private static final long serialVersionUID = -6831808726875304256L;
private final Subscriber<? super ByteBuffer> subscriber;
private volatile long position;
private final AsynchronousFileChannel fileChannel;
private final int chunkSize;
private final long offset;
private final long length;
private volatile boolean done;
private Throwable error;
private volatile ByteBuffer next;
private volatile boolean cancelled;
volatile int wip;
static final AtomicIntegerFieldUpdater<FileReadSubscription> WIP =
AtomicIntegerFieldUpdater.newUpdater(FileReadSubscription.class, "wip");
volatile long requested;
static final AtomicLongFieldUpdater<FileReadSubscription> REQUESTED =
AtomicLongFieldUpdater.newUpdater(FileReadSubscription.class, "requested");
FileReadSubscription(Subscriber<? super ByteBuffer> subscriber, AsynchronousFileChannel fileChannel,
int chunkSize, long offset, long length) {
this.subscriber = subscriber;
this.fileChannel = fileChannel;
this.chunkSize = chunkSize;
this.offset = offset;
this.length = length;
this.position = NOT_SET;
}
@Override
public void request(long n) {
if (Operators.validate(n)) {
Operators.addCap(REQUESTED, this, n);
drain();
}
}
@Override
public void cancel() {
this.cancelled = true;
}
@Override
public void completed(Integer bytesRead, ByteBuffer buffer) {
if (!cancelled) {
if (bytesRead == -1) {
done = true;
} else {
long pos = position;
int bytesWanted = Math.min(bytesRead, maxRequired(pos));
long position2 = pos + bytesWanted;
position = position2;
buffer.position(bytesWanted);
buffer.flip();
next = buffer;
if (position2 >= offset + length) {
done = true;
}
}
drain();
}
}
@Override
public void failed(Throwable exc, ByteBuffer attachment) {
if (!cancelled) {
error = exc;
done = true;
drain();
}
}
private void drain() {
if (WIP.getAndIncrement(this) != 0) {
return;
}
if (position == NOT_SET) {
position = offset;
doRead();
}
int missed = 1;
while (true) {
if (cancelled) {
return;
}
if (REQUESTED.get(this) > 0) {
boolean emitted = false;
boolean d = done;
ByteBuffer bb = next;
if (bb != null) {
next = null;
subscriber.onNext(bb);
emitted = true;
}
if (d) {
if (error != null) {
subscriber.onError(error);
} else {
subscriber.onComplete();
}
return;
}
if (emitted) {
Operators.produced(REQUESTED, this, 1);
doRead();
}
}
missed = WIP.addAndGet(this, -missed);
if (missed == 0) {
return;
}
}
}
private void doRead() {
long pos = position;
ByteBuffer innerBuf = ByteBuffer.allocate(Math.min(chunkSize, maxRequired(pos)));
fileChannel.read(innerBuf, pos, innerBuf, this);
}
private int maxRequired(long pos) {
long maxRequired = offset + length - pos;
if (maxRequired <= 0) {
return 0;
} else {
int m = (int) (maxRequired);
if (m < 0) {
return Integer.MAX_VALUE;
} else {
return m;
}
}
}
}
}
private FluxUtil() {
}
} | class FluxUtil {
private static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
private static final ClientLogger LOGGER = new ClientLogger(FluxUtil.class);
/**
* Checks if a type is Flux<ByteBuffer>.
*
* @param entityType the type to check
* @return whether the type represents a Flux that emits ByteBuffer
*/
public static boolean isFluxByteBuffer(Type entityType) {
if (TypeUtil.isTypeOrSubTypeOf(entityType, Flux.class)) {
final Type innerType = TypeUtil.getTypeArguments(entityType)[0];
return TypeUtil.isTypeOrSubTypeOf(innerType, ByteBuffer.class);
}
return false;
}
/**
* Collects ByteBuffers emitted by a Flux into a byte array.
*
* @param stream A stream which emits ByteBuffer instances.
* @return A Mono which emits the concatenation of all the ByteBuffer instances given by the source Flux.
* @throws IllegalStateException If the combined size of the emitted ByteBuffers is greater than {@link
* Integer
*/
public static Mono<byte[]> collectBytesInByteBufferStream(Flux<ByteBuffer> stream) {
return stream.collect(ByteBufferCollector::new, ByteBufferCollector::write)
.map(ByteBufferCollector::toByteArray);
}
/**
* Collects ByteBuffers emitted by a Flux into a byte array.
* <p>
* Unlike {@link
* This size hint allows for optimizations when creating the initial buffer to reduce the number of times it needs
* to be resized while concatenating emitted ByteBuffers.
*
* @param stream A stream which emits ByteBuffer instances.
* @param sizeHint A hint about the expected stream size.
* @return A Mono which emits the concatenation of all the ByteBuffer instances given by the source Flux.
* @throws IllegalArgumentException If {@code sizeHint} is equal to or less than {@code 0}.
* @throws IllegalStateException If the combined size of the emitted ByteBuffers is greater than {@link
* Integer
*/
public static Mono<byte[]> collectBytesInByteBufferStream(Flux<ByteBuffer> stream, int sizeHint) {
return stream.collect(() -> new ByteBufferCollector(sizeHint), ByteBufferCollector::write)
.map(ByteBufferCollector::toByteArray);
}
/**
* Collects ByteBuffers returned in a network response into a byte array.
* <p>
* The {@code headers} are inspected for containing an {@code Content-Length} which determines if a size hinted
* collection, {@link
*
*
* @param stream A network response ByteBuffer stream.
* @param headers The HTTP headers of the response.
* @return A Mono which emits the collected network response ByteBuffers.
* @throws NullPointerException If {@code headers} is null.
* @throws IllegalStateException If the size of the network response is greater than {@link Integer
*/
public static Mono<byte[]> collectBytesFromNetworkResponse(Flux<ByteBuffer> stream, HttpHeaders headers) {
Objects.requireNonNull(headers, "'headers' cannot be null.");
String contentLengthHeader = headers.getValue("Content-Length");
if (contentLengthHeader == null) {
return FluxUtil.collectBytesInByteBufferStream(stream);
} else {
try {
int contentLength = Integer.parseInt(contentLengthHeader);
if (contentLength > 0) {
return FluxUtil.collectBytesInByteBufferStream(stream, contentLength);
} else {
return Mono.just(EMPTY_BYTE_ARRAY);
}
} catch (NumberFormatException ex) {
return FluxUtil.collectBytesInByteBufferStream(stream);
}
}
}
/**
* Gets the content of the provided ByteBuffer as a byte array. This method will create a new byte array even if the
* ByteBuffer can have optionally backing array.
*
* @param byteBuffer the byte buffer
* @return the byte array
*/
public static byte[] byteBufferToArray(ByteBuffer byteBuffer) {
int length = byteBuffer.remaining();
byte[] byteArray = new byte[length];
byteBuffer.get(byteArray);
return byteArray;
}
/**
* Creates a {@link Flux} that is capable of resuming a download by applying retry logic when an error occurs.
*
* @param downloadSupplier Supplier of the initial download.
* @param onDownloadErrorResume {@link BiFunction} of {@link Throwable} and {@link Long} which is used to resume
* downloading when an error occurs.
* @param maxRetries The maximum number of times a download can be resumed when an error occurs.
* @return A {@link Flux} that downloads reliably.
*/
public static Flux<ByteBuffer> createRetriableDownloadFlux(Supplier<Flux<ByteBuffer>> downloadSupplier,
BiFunction<Throwable, Long, Flux<ByteBuffer>> onDownloadErrorResume, int maxRetries) {
return createRetriableDownloadFlux(downloadSupplier, onDownloadErrorResume, maxRetries, 0L);
}
/**
* Creates a {@link Flux} that is capable of resuming a download by applying retry logic when an error occurs.
*
* @param downloadSupplier Supplier of the initial download.
* @param onDownloadErrorResume {@link BiFunction} of {@link Throwable} and {@link Long} which is used to resume
* downloading when an error occurs.
* @param maxRetries The maximum number of times a download can be resumed when an error occurs.
* @param position The initial offset for the download.
* @return A {@link Flux} that downloads reliably.
*/
public static Flux<ByteBuffer> createRetriableDownloadFlux(Supplier<Flux<ByteBuffer>> downloadSupplier,
BiFunction<Throwable, Long, Flux<ByteBuffer>> onDownloadErrorResume, int maxRetries, long position) {
return new RetriableDownloadFlux(downloadSupplier, onDownloadErrorResume, maxRetries, position);
}
/**
* Converts an {@link InputStream} into a {@link Flux} of {@link ByteBuffer} using a chunk size of 4096.
* <p>
* Given that {@link InputStream} is not guaranteed to be replayable the returned {@link Flux} should be considered
* non-replayable as well.
* <p>
* If the passed {@link InputStream} is {@code null} {@link Flux
*
* @param inputStream The {@link InputStream} to convert into a {@link Flux}.
* @return A {@link Flux} of {@link ByteBuffer ByteBuffers} that contains the contents of the stream.
*/
public static Flux<ByteBuffer> toFluxByteBuffer(InputStream inputStream) {
return toFluxByteBuffer(inputStream, 4096);
}
/**
* Converts an {@link InputStream} into a {@link Flux} of {@link ByteBuffer}.
* <p>
* Given that {@link InputStream} is not guaranteed to be replayable the returned {@link Flux} should be considered
* non-replayable as well.
* <p>
* If the passed {@link InputStream} is {@code null} {@link Flux
*
* @param inputStream The {@link InputStream} to convert into a {@link Flux}.
* @param chunkSize The requested size for each {@link ByteBuffer}.
* @return A {@link Flux} of {@link ByteBuffer ByteBuffers} that contains the contents of the stream.
* @throws IllegalArgumentException If {@code chunkSize} is less than or equal to {@code 0}.
*/
public static Flux<ByteBuffer> toFluxByteBuffer(InputStream inputStream, int chunkSize) {
if (chunkSize <= 0) {
return Flux.error(new IllegalArgumentException("'chunkSize' must be greater than 0."));
}
if (inputStream == null) {
return Flux.empty();
}
if (inputStream instanceof FileInputStream) {
FileChannel fileChannel = ((FileInputStream) inputStream).getChannel();
return Flux.<ByteBuffer, FileChannel>generate(() -> fileChannel, (channel, sink) -> {
try {
long channelPosition = channel.position();
long channelSize = channel.size();
if (channelPosition == channelSize) {
channel.close();
sink.complete();
} else {
int nextByteBufferSize = (int) Math.min(chunkSize, channelSize - channelPosition);
sink.next(channel.map(FileChannel.MapMode.READ_ONLY, channelPosition, nextByteBufferSize));
channel.position(channelPosition + nextByteBufferSize);
}
} catch (IOException ex) {
sink.error(ex);
}
return channel;
});
}
return Flux.<ByteBuffer, InputStream>generate(() -> inputStream, (stream, sink) -> {
byte[] buffer = new byte[chunkSize];
try {
int offset = 0;
while (offset < chunkSize) {
int readCount = inputStream.read(buffer, offset, chunkSize - offset);
if (readCount == -1) {
if (offset > 0) {
sink.next(ByteBuffer.wrap(buffer, 0, offset));
}
sink.complete();
return stream;
}
offset += readCount;
}
sink.next(ByteBuffer.wrap(buffer));
} catch (IOException ex) {
sink.error(ex);
}
return stream;
}).filter(ByteBuffer::hasRemaining);
}
/**
* This method converts the incoming {@code deferContextual} from {@link reactor.util.context.Context Reactor
* Context} to {@link Context Azure Context} and calls the given lambda function with this context and returns a
* single entity of type {@code T}
* <p>
* If the reactor context is empty, {@link Context
* </p>
*
* <p><strong>Code samples</strong></p>
* <!-- src_embed com.azure.core.implementation.util.fluxutil.withcontext -->
* <pre>
* String prefix = "Hello, ";
* Mono<String> response = FluxUtil
* .withContext&
* </pre>
* <!-- end com.azure.core.implementation.util.fluxutil.withcontext -->
*
* @param serviceCall The lambda function that makes the service call into which azure context will be passed
* @param <T> The type of response returned from the service call
* @return The response from service call
*/
public static <T> Mono<T> withContext(Function<Context, Mono<T>> serviceCall) {
return withContext(serviceCall, Collections.emptyMap());
}
/**
* This method converts the incoming {@code deferContextual} from {@link reactor.util.context.Context Reactor
* Context} to {@link Context Azure Context}, adds the specified context attributes and calls the given lambda
* function with this context and returns a single entity of type {@code T}
* <p>
* If the reactor context is empty, {@link Context
* </p>
*
* @param serviceCall serviceCall The lambda function that makes the service call into which azure context will be
* passed
* @param contextAttributes The map of attributes sent by the calling method to be set on {@link Context}.
* @param <T> The type of response returned from the service call
* @return The response from service call
*/
public static <T> Mono<T> withContext(Function<Context, Mono<T>> serviceCall,
Map<String, String> contextAttributes) {
return Mono.deferContextual(context -> {
final Context[] azureContext = new Context[]{Context.NONE};
if (!CoreUtils.isNullOrEmpty(contextAttributes)) {
contextAttributes.forEach((key, value) -> azureContext[0] = azureContext[0].addData(key, value));
}
if (!context.isEmpty()) {
context.stream().forEach(entry ->
azureContext[0] = azureContext[0].addData(entry.getKey(), entry.getValue()));
}
return serviceCall.apply(azureContext[0]);
});
}
/**
* Converts the incoming content to Mono.
*
* @param <T> The type of the Response, which will be returned in the Mono.
* @param response whose {@link Response
* @return The converted {@link Mono}
*/
public static <T> Mono<T> toMono(Response<T> response) {
return Mono.justOrEmpty(response.getValue());
}
/**
* Propagates a {@link RuntimeException} through the error channel of {@link Mono}.
*
* @param logger The {@link ClientLogger} to log the exception.
* @param ex The {@link RuntimeException}.
* @param <T> The return type.
* @return A {@link Mono} that terminates with error wrapping the {@link RuntimeException}.
*/
public static <T> Mono<T> monoError(ClientLogger logger, RuntimeException ex) {
return Mono.error(logger.logExceptionAsError(Exceptions.propagate(ex)));
}
/**
* Propagates a {@link RuntimeException} through the error channel of {@link Mono}.
*
* @param logBuilder The {@link LoggingEventBuilder} with context to log the exception.
* @param ex The {@link RuntimeException}.
* @param <T> The return type.
* @return A {@link Mono} that terminates with error wrapping the {@link RuntimeException}.
*/
public static <T> Mono<T> monoError(LoggingEventBuilder logBuilder, RuntimeException ex) {
return Mono.error(logBuilder.log(Exceptions.propagate(ex)));
}
/**
* Propagates a {@link RuntimeException} through the error channel of {@link Flux}.
*
* @param logger The {@link ClientLogger} to log the exception.
* @param ex The {@link RuntimeException}.
* @param <T> The return type.
* @return A {@link Flux} that terminates with error wrapping the {@link RuntimeException}.
*/
public static <T> Flux<T> fluxError(ClientLogger logger, RuntimeException ex) {
return Flux.error(logger.logExceptionAsError(Exceptions.propagate(ex)));
}
/**
* Propagates a {@link RuntimeException} through the error channel of {@link PagedFlux}.
*
* @param logger The {@link ClientLogger} to log the exception.
* @param ex The {@link RuntimeException}.
* @param <T> The return type.
* @return A {@link PagedFlux} that terminates with error wrapping the {@link RuntimeException}.
*/
public static <T> PagedFlux<T> pagedFluxError(ClientLogger logger, RuntimeException ex) {
return new PagedFlux<>(() -> monoError(logger, ex));
}
/**
* This method converts the incoming {@code deferContextual} from {@link reactor.util.context.Context Reactor
* Context} to {@link Context Azure Context} and calls the given lambda function with this context and returns a
* collection of type {@code T}
* <p>
* If the reactor context is empty, {@link Context
* </p>
*
* <p><strong>Code samples</strong></p>
* <!-- src_embed com.azure.core.implementation.util.fluxutil.fluxcontext -->
* <pre>
* String prefix = "Hello, ";
* Flux<String> response = FluxUtil
* .fluxContext&
* </pre>
* <!-- end com.azure.core.implementation.util.fluxutil.fluxcontext -->
*
* @param serviceCall The lambda function that makes the service call into which the context will be passed
* @param <T> The type of response returned from the service call
* @return The response from service call
*/
public static <T> Flux<T> fluxContext(Function<Context, Flux<T>> serviceCall) {
return Flux.deferContextual(context -> serviceCall.apply(toAzureContext(context)));
}
/**
* Converts a reactor context to azure context. If the reactor context is {@code null} or empty, {@link
* Context
*
* @param context The reactor context
* @return The azure context
*/
private static Context toAzureContext(ContextView context) {
final Context[] azureContext = new Context[]{Context.NONE};
if (!context.isEmpty()) {
context.stream().forEach(entry ->
azureContext[0] = azureContext[0].addData(entry.getKey(), entry.getValue()));
}
return azureContext[0];
}
/**
* Converts an Azure context to Reactor context. If the Azure context is {@code null} or empty, {@link
* reactor.util.context.Context
*
* @param context The Azure context.
* @return The Reactor context.
*/
public static reactor.util.context.Context toReactorContext(Context context) {
if (context == null) {
return reactor.util.context.Context.empty();
}
reactor.util.context.Context returnContext = reactor.util.context.Context.empty();
Context[] contextChain = context.getContextChain();
for (Context toAdd : contextChain) {
if (toAdd == null || toAdd.getValue() == null) {
continue;
}
returnContext = returnContext.put(toAdd.getKey(), toAdd.getValue());
}
return returnContext;
}
/**
* Writes the {@link ByteBuffer ByteBuffers} emitted by a {@link Flux} of {@link ByteBuffer} to an {@link
* OutputStream}.
* <p>
* The {@code stream} is not closed by this call, closing of the {@code stream} is managed by the caller.
* <p>
* The response {@link Mono} will emit an error if {@code content} or {@code stream} are null. Additionally, an
* error will be emitted if an exception occurs while writing the {@code content} to the {@code stream}.
*
* @param content The {@link Flux} of {@link ByteBuffer} content.
* @param stream The {@link OutputStream} being written into.
* @return A {@link Mono} which emits a completion status once the {@link Flux} has been written to the {@link
* OutputStream}, or an error status if writing fails.
*/
public static Mono<Void> writeToOutputStream(Flux<ByteBuffer> content, OutputStream stream) {
if (content == null && stream == null) {
return monoError(LOGGER, new NullPointerException("'content' and 'stream' cannot be null."));
} else if (content == null) {
return monoError(LOGGER, new NullPointerException("'content' cannot be null."));
} else if (stream == null) {
return monoError(LOGGER, new NullPointerException("'stream' cannot be null."));
}
return Mono.create(emitter -> content.subscribe(new OutputStreamWriteSubscriber(emitter, stream, LOGGER)));
}
/**
* Writes the {@link ByteBuffer ByteBuffers} emitted by a {@link Flux} of {@link ByteBuffer} to an {@link
* AsynchronousFileChannel}.
* <p>
* The {@code outFile} is not closed by this call, closing of the {@code outFile} is managed by the caller.
* <p>
* The response {@link Mono} will emit an error if {@code content} or {@code outFile} are null. Additionally, an
* error will be emitted if the {@code outFile} wasn't opened with the proper open options, such as {@link
* StandardOpenOption
*
* @param content The {@link Flux} of {@link ByteBuffer} content.
* @param outFile The {@link AsynchronousFileChannel}.
* @return A {@link Mono} which emits a completion status once the {@link Flux} has been written to the {@link
* AsynchronousFileChannel}.
* @throws NullPointerException When {@code content} is null.
* @throws NullPointerException When {@code outFile} is null.
*/
public static Mono<Void> writeFile(Flux<ByteBuffer> content, AsynchronousFileChannel outFile) {
return writeFile(content, outFile, 0);
}
/**
* Writes the {@link ByteBuffer ByteBuffers} emitted by a {@link Flux} of {@link ByteBuffer} to an {@link
* AsynchronousFileChannel} starting at the given {@code position} in the file.
* <p>
* The {@code outFile} is not closed by this call, closing of the {@code outFile} is managed by the caller.
* <p>
* The response {@link Mono} will emit an error if {@code content} or {@code outFile} are null or {@code position}
* is less than 0. Additionally, an error will be emitted if the {@code outFile} wasn't opened with the proper open
* options, such as {@link StandardOpenOption
*
* @param content The {@link Flux} of {@link ByteBuffer} content.
* @param outFile The {@link AsynchronousFileChannel}.
* @param position The position in the file to begin writing the {@code content}.
* @return A {@link Mono} which emits a completion status once the {@link Flux} has been written to the {@link
* AsynchronousFileChannel}.
* @throws NullPointerException When {@code content} is null.
* @throws NullPointerException When {@code outFile} is null.
* @throws IllegalArgumentException When {@code position} is negative.
*/
public static Mono<Void> writeFile(Flux<ByteBuffer> content, AsynchronousFileChannel outFile, long position) {
if (content == null && outFile == null) {
return monoError(LOGGER, new NullPointerException("'content' and 'outFile' cannot be null."));
} else if (content == null) {
return monoError(LOGGER, new NullPointerException("'content' cannot be null."));
} else if (outFile == null) {
return monoError(LOGGER, new NullPointerException("'outFile' cannot be null."));
} else if (position < 0) {
return monoError(LOGGER, new IllegalArgumentException("'position' cannot be less than 0."));
}
return writeToAsynchronousByteChannel(content, IOUtils.toAsynchronousByteChannel(outFile, position));
}
/**
* Writes the {@link ByteBuffer ByteBuffers} emitted by a {@link Flux} of {@link ByteBuffer} to an {@link
* AsynchronousByteChannel}.
* <p>
* The {@code channel} is not closed by this call, closing of the {@code channel} is managed by the caller.
* <p>
* The response {@link Mono} will emit an error if {@code content} or {@code channel} are null.
*
* @param content The {@link Flux} of {@link ByteBuffer} content.
* @param channel The {@link AsynchronousByteChannel}.
* @return A {@link Mono} which emits a completion status once the {@link Flux} has been written to the {@link
* AsynchronousByteChannel}.
* @throws NullPointerException When {@code content} is null.
* @throws NullPointerException When {@code channel} is null.
*/
public static Mono<Void> writeToAsynchronousByteChannel(Flux<ByteBuffer> content, AsynchronousByteChannel channel) {
if (content == null && channel == null) {
return monoError(LOGGER, new NullPointerException("'content' and 'channel' cannot be null."));
} else if (content == null) {
return monoError(LOGGER, new NullPointerException("'content' cannot be null."));
} else if (channel == null) {
return monoError(LOGGER, new NullPointerException("'channel' cannot be null."));
}
return Mono.create(emitter -> content.subscribe(
new AsynchronousByteChannelWriteSubscriber(channel, emitter)));
}
/**
* Writes the {@link ByteBuffer ByteBuffers} emitted by a {@link Flux} of {@link ByteBuffer} to an {@link
* WritableByteChannel}.
* <p>
* The {@code channel} is not closed by this call, closing of the {@code channel} is managed by the caller.
* <p>
* The response {@link Mono} will emit an error if {@code content} or {@code channel} are null.
*
* @param content The {@link Flux} of {@link ByteBuffer} content.
* @param channel The {@link WritableByteChannel}.
* @return A {@link Mono} which emits a completion status once the {@link Flux} has been written to the {@link
* WritableByteChannel}.
* @throws NullPointerException When {@code content} is null.
* @throws NullPointerException When {@code channel} is null.
*/
/**
* Creates a {@link Flux} from an {@link AsynchronousFileChannel} which reads part of a file into chunks of the
* given size.
*
* @param fileChannel The file channel.
* @param chunkSize the size of file chunks to read.
* @param offset The offset in the file to begin reading.
* @param length The number of bytes to read from the file.
* @return the Flux.
*/
public static Flux<ByteBuffer> readFile(AsynchronousFileChannel fileChannel, int chunkSize, long offset,
long length) {
return new FileReadFlux(fileChannel, chunkSize, offset, length);
}
/**
* Creates a {@link Flux} from an {@link AsynchronousFileChannel} which reads part of a file.
*
* @param fileChannel The file channel.
* @param offset The offset in the file to begin reading.
* @param length The number of bytes to read from the file.
* @return the Flux.
*/
public static Flux<ByteBuffer> readFile(AsynchronousFileChannel fileChannel, long offset, long length) {
return readFile(fileChannel, DEFAULT_CHUNK_SIZE, offset, length);
}
/**
* Creates a {@link Flux} from an {@link AsynchronousFileChannel} which reads the entire file.
*
* @param fileChannel The file channel.
* @return The AsyncInputStream.
*/
public static Flux<ByteBuffer> readFile(AsynchronousFileChannel fileChannel) {
try {
long size = fileChannel.size();
return readFile(fileChannel, DEFAULT_CHUNK_SIZE, 0, size);
} catch (IOException e) {
return Flux.error(new RuntimeException("Failed to read the file.", e));
}
}
private static final int DEFAULT_CHUNK_SIZE = 1024 * 64;
private static final class FileReadFlux extends Flux<ByteBuffer> {
private final AsynchronousFileChannel fileChannel;
private final int chunkSize;
private final long offset;
private final long length;
FileReadFlux(AsynchronousFileChannel fileChannel, int chunkSize, long offset, long length) {
this.fileChannel = fileChannel;
this.chunkSize = chunkSize;
this.offset = offset;
this.length = length;
}
@Override
public void subscribe(CoreSubscriber<? super ByteBuffer> actual) {
FileReadSubscription subscription =
new FileReadSubscription(actual, fileChannel, chunkSize, offset, length);
actual.onSubscribe(subscription);
}
static final class FileReadSubscription implements Subscription, CompletionHandler<Integer, ByteBuffer> {
private static final int NOT_SET = -1;
private static final long serialVersionUID = -6831808726875304256L;
private final Subscriber<? super ByteBuffer> subscriber;
private volatile long position;
private final AsynchronousFileChannel fileChannel;
private final int chunkSize;
private final long offset;
private final long length;
private volatile boolean done;
private Throwable error;
private volatile ByteBuffer next;
private volatile boolean cancelled;
volatile int wip;
static final AtomicIntegerFieldUpdater<FileReadSubscription> WIP =
AtomicIntegerFieldUpdater.newUpdater(FileReadSubscription.class, "wip");
volatile long requested;
static final AtomicLongFieldUpdater<FileReadSubscription> REQUESTED =
AtomicLongFieldUpdater.newUpdater(FileReadSubscription.class, "requested");
FileReadSubscription(Subscriber<? super ByteBuffer> subscriber, AsynchronousFileChannel fileChannel,
int chunkSize, long offset, long length) {
this.subscriber = subscriber;
this.fileChannel = fileChannel;
this.chunkSize = chunkSize;
this.offset = offset;
this.length = length;
this.position = NOT_SET;
}
@Override
public void request(long n) {
if (Operators.validate(n)) {
Operators.addCap(REQUESTED, this, n);
drain();
}
}
@Override
public void cancel() {
this.cancelled = true;
}
@Override
public void completed(Integer bytesRead, ByteBuffer buffer) {
if (!cancelled) {
if (bytesRead == -1) {
done = true;
} else {
long pos = position;
int bytesWanted = Math.min(bytesRead, maxRequired(pos));
long position2 = pos + bytesWanted;
position = position2;
buffer.position(bytesWanted);
buffer.flip();
next = buffer;
if (position2 >= offset + length) {
done = true;
}
}
drain();
}
}
@Override
public void failed(Throwable exc, ByteBuffer attachment) {
if (!cancelled) {
error = exc;
done = true;
drain();
}
}
private void drain() {
if (WIP.getAndIncrement(this) != 0) {
return;
}
if (position == NOT_SET) {
position = offset;
doRead();
}
int missed = 1;
while (true) {
if (cancelled) {
return;
}
if (REQUESTED.get(this) > 0) {
boolean emitted = false;
boolean d = done;
ByteBuffer bb = next;
if (bb != null) {
next = null;
subscriber.onNext(bb);
emitted = true;
}
if (d) {
if (error != null) {
subscriber.onError(error);
} else {
subscriber.onComplete();
}
return;
}
if (emitted) {
Operators.produced(REQUESTED, this, 1);
doRead();
}
}
missed = WIP.addAndGet(this, -missed);
if (missed == 0) {
return;
}
}
}
private void doRead() {
long pos = position;
ByteBuffer innerBuf = ByteBuffer.allocate(Math.min(chunkSize, maxRequired(pos)));
fileChannel.read(innerBuf, pos, innerBuf, this);
}
private int maxRequired(long pos) {
long maxRequired = offset + length - pos;
if (maxRequired <= 0) {
return 0;
} else {
int m = (int) (maxRequired);
if (m < 0) {
return Integer.MAX_VALUE;
} else {
return m;
}
}
}
}
}
private FluxUtil() {
}
} |
Does this need to be checked here or can this be done by the `FluxUtil` method? | public void transferBodyTo(WritableByteChannel channel) throws IOException {
Objects.requireNonNull(channel, "'channel' must not be null");
Flux<ByteBuffer> body = getBody();
if (body != null) {
FluxUtil.writeToWritableByteChannel(body, channel).block();
}
} | Objects.requireNonNull(channel, "'channel' must not be null"); | public void transferBodyTo(WritableByteChannel channel) throws IOException {
Flux<ByteBuffer> body = getBody();
if (body != null) {
FluxUtil.writeToWritableByteChannel(body, channel).block();
}
} | class HttpResponse implements Closeable {
private final HttpRequest request;
/**
* Creates an instance of {@link HttpResponse}.
*
* @param request The {@link HttpRequest} that resulted in this {@link HttpResponse}.
*/
protected HttpResponse(HttpRequest request) {
this.request = request;
}
/**
* Get the response status code.
*
* @return The response status code
*/
public abstract int getStatusCode();
/**
* Lookup a response header with the provided name.
*
* @param name the name of the header to lookup.
* @return the value of the header, or null if the header doesn't exist in the response.
*/
public abstract String getHeaderValue(String name);
/**
* Get all response headers.
*
* @return the response headers
*/
public abstract HttpHeaders getHeaders();
/**
* Get the publisher emitting response content chunks.
* <p>
* Returns a stream of the response's body content. Emissions may occur on Reactor threads which should not be
* blocked. Blocking should be avoided as much as possible/practical in reactive programming but if you do use
* methods like {@code block()} on the stream then be sure to use {@code publishOn} before the blocking call.
*
* @return The response's content as a stream of {@link ByteBuffer}.
*/
public abstract Flux<ByteBuffer> getBody();
/**
* Gets the {@link BinaryData} that represents the body of the response.
*
* Subclasses should override this method.
*
* @return The {@link BinaryData} response body.
*/
public BinaryData getBodyAsBinaryData() {
Flux<ByteBuffer> body = getBody();
if (body != null) {
return BinaryDataHelper.createBinaryData(new FluxByteBufferContent(body));
} else {
return null;
}
}
/**
* Gets the response content as a {@code byte[]}.
*
* @return The response content as a {@code byte[]}.
*/
public abstract Mono<byte[]> getBodyAsByteArray();
/**
* Gets the response content as a {@link String}.
* <p>
* By default this method will inspect the response body for containing a byte order mark (BOM) to determine the
* encoding of the string (UTF-8, UTF-16, etc.). If a BOM isn't found this will default to using UTF-8 as the
* encoding, if a specific encoding is required use {@link
*
* @return The response content as a {@link String}.
*/
public abstract Mono<String> getBodyAsString();
/**
* Gets the response content as a {@link String}.
*
* @param charset The {@link Charset} to use as the string encoding.
* @return The response content as a {@link String}.
*/
public abstract Mono<String> getBodyAsString(Charset charset);
/**
* Gets the response content as an {@link InputStream}.
*
* @return The response content as an {@link InputStream}.
*/
public Mono<InputStream> getBodyAsInputStream() {
return getBodyAsByteArray().map(ByteArrayInputStream::new);
}
/**
* Gets the {@link HttpRequest request} which resulted in this response.
*
* @return The {@link HttpRequest request} which resulted in this response.
*/
public final HttpRequest getRequest() {
return request;
}
/**
* Gets a new {@link HttpResponse response} object wrapping this response with its content buffered into memory.
*
* @return A new {@link HttpResponse response} with the content buffered.
*/
public HttpResponse buffer() {
return new BufferedHttpResponse(this);
}
/**
* Transfers body bytes to the {@link AsynchronousByteChannel}.
* @param channel The destination {@link AsynchronousByteChannel}.
* @return A {@link Mono} that completes when transfer is completed.
* @throws NullPointerException When {@code channel} is null.
*/
public Mono<Void> transferBodyToAsync(AsynchronousByteChannel channel) {
Objects.requireNonNull(channel, "'channel' must not be null");
Flux<ByteBuffer> body = getBody();
if (body != null) {
return FluxUtil.writeToAsynchronousByteChannel(body, channel);
} else {
return Mono.empty();
}
}
/**
* Transfers body bytes to the {@link WritableByteChannel}.
* @param channel The destination {@link WritableByteChannel}.
* @throws IOException When I/O operation fails.
* @throws NullPointerException When {@code channel} is null.
*/
/**
* Closes the response content stream, if any.
*/
@Override
public void close() {
}
} | class HttpResponse implements Closeable {
private final HttpRequest request;
/**
* Creates an instance of {@link HttpResponse}.
*
* @param request The {@link HttpRequest} that resulted in this {@link HttpResponse}.
*/
protected HttpResponse(HttpRequest request) {
this.request = request;
}
/**
* Get the response status code.
*
* @return The response status code
*/
public abstract int getStatusCode();
/**
* Lookup a response header with the provided name.
*
* @param name the name of the header to lookup.
* @return the value of the header, or null if the header doesn't exist in the response.
*/
public abstract String getHeaderValue(String name);
/**
* Get all response headers.
*
* @return the response headers
*/
public abstract HttpHeaders getHeaders();
/**
* Get the publisher emitting response content chunks.
* <p>
* Returns a stream of the response's body content. Emissions may occur on Reactor threads which should not be
* blocked. Blocking should be avoided as much as possible/practical in reactive programming but if you do use
* methods like {@code block()} on the stream then be sure to use {@code publishOn} before the blocking call.
*
* @return The response's content as a stream of {@link ByteBuffer}.
*/
public abstract Flux<ByteBuffer> getBody();
/**
* Gets the {@link BinaryData} that represents the body of the response.
*
* Subclasses should override this method.
*
* @return The {@link BinaryData} response body.
*/
public BinaryData getBodyAsBinaryData() {
Flux<ByteBuffer> body = getBody();
if (body != null) {
return BinaryDataHelper.createBinaryData(new FluxByteBufferContent(body));
} else {
return null;
}
}
/**
* Gets the response content as a {@code byte[]}.
*
* @return The response content as a {@code byte[]}.
*/
public abstract Mono<byte[]> getBodyAsByteArray();
/**
* Gets the response content as a {@link String}.
* <p>
* By default this method will inspect the response body for containing a byte order mark (BOM) to determine the
* encoding of the string (UTF-8, UTF-16, etc.). If a BOM isn't found this will default to using UTF-8 as the
* encoding, if a specific encoding is required use {@link
*
* @return The response content as a {@link String}.
*/
public abstract Mono<String> getBodyAsString();
/**
* Gets the response content as a {@link String}.
*
* @param charset The {@link Charset} to use as the string encoding.
* @return The response content as a {@link String}.
*/
public abstract Mono<String> getBodyAsString(Charset charset);
/**
* Gets the response content as an {@link InputStream}.
*
* @return The response content as an {@link InputStream}.
*/
public Mono<InputStream> getBodyAsInputStream() {
return getBodyAsByteArray().map(ByteArrayInputStream::new);
}
/**
* Gets the {@link HttpRequest request} which resulted in this response.
*
* @return The {@link HttpRequest request} which resulted in this response.
*/
public final HttpRequest getRequest() {
return request;
}
/**
* Gets a new {@link HttpResponse response} object wrapping this response with its content buffered into memory.
*
* @return A new {@link HttpResponse response} with the content buffered.
*/
public HttpResponse buffer() {
return new BufferedHttpResponse(this);
}
/**
* Transfers body bytes to the {@link AsynchronousByteChannel}.
* @param channel The destination {@link AsynchronousByteChannel}.
* @return A {@link Mono} that completes when transfer is completed.
* @throws NullPointerException When {@code channel} is null.
*/
public Mono<Void> transferBodyToAsync(AsynchronousByteChannel channel) {
Objects.requireNonNull(channel, "'channel' must not be null");
Flux<ByteBuffer> body = getBody();
if (body != null) {
return FluxUtil.writeToAsynchronousByteChannel(body, channel);
} else {
return Mono.empty();
}
}
/**
* Transfers body bytes to the {@link WritableByteChannel}.
* @param channel The destination {@link WritableByteChannel}.
* @throws IOException When I/O operation fails.
* @throws NullPointerException When {@code channel} is null.
*/
/**
* Closes the response content stream, if any.
*/
@Override
public void close() {
}
} |
Doesn't this need to handle the case where the full buffer isn't written? | public void completed(Integer result, ByteBuffer attachment) {
try {
transferAsynchronously(source, destination, buffer, sink);
} catch (IOException e) {
sink.error(e);
}
} | transferAsynchronously(source, destination, buffer, sink); | public void completed(Integer result, ByteBuffer attachment) {
try {
if (buffer.hasRemaining()) {
destination.write(buffer, buffer, this);
} else {
transferAsynchronously(source, destination, buffer, sink);
}
} catch (IOException e) {
sink.error(e);
}
} | class IOUtils {
private static final ClientLogger LOGGER = new ClientLogger(IOUtils.class);
private static final int DEFAULT_BUFFER_SIZE = 8192;
/**
* Adapts {@link AsynchronousFileChannel} to {@link AsynchronousByteChannel}.
* @param fileChannel The {@link AsynchronousFileChannel}.
* @param position The position in the file to begin writing or reading the {@code content}.
* @return A {@link AsynchronousByteChannel} that delegates to {@code fileChannel}.
* @throws NullPointerException When {@code fileChannel} is null.
* @throws IllegalArgumentException When {@code position} is negative.
*/
public static AsynchronousByteChannel toAsynchronousByteChannel(
AsynchronousFileChannel fileChannel, long position) {
Objects.requireNonNull(fileChannel, "'fileChannel' must not be null");
if (position < 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'position' cannot be less than 0."));
}
return new AsynchronousFileChannelAdapter(fileChannel, position);
}
/**
* Transfers bytes from {@link ReadableByteChannel} to {@link WritableByteChannel}.
* @param source A source {@link ReadableByteChannel}.
* @param destination A destination {@link WritableByteChannel}.
* @throws IOException When I/O operation fails.
* @throws NullPointerException When {@code source} is null.
* @throws NullPointerException When {@code destination} is null.
*/
public static void transfer(ReadableByteChannel source, WritableByteChannel destination) throws IOException {
Objects.requireNonNull(source, "'source' must not be null");
Objects.requireNonNull(source, "'destination' must not be null");
ByteBuffer buffer = ByteBuffer.allocate(DEFAULT_BUFFER_SIZE);
int read;
do {
buffer.clear();
read = source.read(buffer);
buffer.flip();
while (buffer.hasRemaining()) {
destination.write(buffer);
}
} while (read >= 0);
}
/**
* Transfers bytes from {@link ReadableByteChannel} to {@link AsynchronousByteChannel}.
* @param source A source {@link ReadableByteChannel}.
* @param destination A destination {@link AsynchronousByteChannel}.
* @return A {@link Mono} that completes when transfer is finished.
* @throws NullPointerException When {@code source} is null.
* @throws NullPointerException When {@code destination} is null.
*/
public static Mono<Void> transferAsync(ReadableByteChannel source, AsynchronousByteChannel destination) {
Objects.requireNonNull(source, "'source' must not be null");
Objects.requireNonNull(source, "'destination' must not be null");
return Mono.create(sink -> sink.onRequest(value -> {
ByteBuffer buffer = ByteBuffer.allocate(DEFAULT_BUFFER_SIZE);
try {
transferAsynchronously(source, destination, buffer, sink);
} catch (IOException e) {
sink.error(e);
}
}));
}
private static void transferAsynchronously(
ReadableByteChannel source, AsynchronousByteChannel destination,
ByteBuffer buffer, MonoSink<Void> sink) throws IOException {
buffer.clear();
int read = source.read(buffer);
if (read >= 0) {
buffer.flip();
destination.write(buffer, buffer, new CompletionHandler<Integer, ByteBuffer>() {
@Override
@Override
public void failed(Throwable e, ByteBuffer attachment) {
sink.error(e);
}
});
} else {
sink.success();
}
}
} | class IOUtils {
private static final ClientLogger LOGGER = new ClientLogger(IOUtils.class);
private static final int DEFAULT_BUFFER_SIZE = 8192;
/**
* Adapts {@link AsynchronousFileChannel} to {@link AsynchronousByteChannel}.
* @param fileChannel The {@link AsynchronousFileChannel}.
* @param position The position in the file to begin writing or reading the {@code content}.
* @return A {@link AsynchronousByteChannel} that delegates to {@code fileChannel}.
* @throws NullPointerException When {@code fileChannel} is null.
* @throws IllegalArgumentException When {@code position} is negative.
*/
public static AsynchronousByteChannel toAsynchronousByteChannel(
AsynchronousFileChannel fileChannel, long position) {
Objects.requireNonNull(fileChannel, "'fileChannel' must not be null");
if (position < 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'position' cannot be less than 0."));
}
return new AsynchronousFileChannelAdapter(fileChannel, position);
}
/**
* Transfers bytes from {@link ReadableByteChannel} to {@link WritableByteChannel}.
* @param source A source {@link ReadableByteChannel}.
* @param destination A destination {@link WritableByteChannel}.
* @throws IOException When I/O operation fails.
* @throws NullPointerException When {@code source} is null.
* @throws NullPointerException When {@code destination} is null.
*/
public static void transfer(ReadableByteChannel source, WritableByteChannel destination) throws IOException {
Objects.requireNonNull(source, "'source' must not be null");
Objects.requireNonNull(source, "'destination' must not be null");
ByteBuffer buffer = ByteBuffer.allocate(DEFAULT_BUFFER_SIZE);
int read;
do {
buffer.clear();
read = source.read(buffer);
buffer.flip();
while (buffer.hasRemaining()) {
destination.write(buffer);
}
} while (read >= 0);
}
/**
* Transfers bytes from {@link ReadableByteChannel} to {@link AsynchronousByteChannel}.
* @param source A source {@link ReadableByteChannel}.
* @param destination A destination {@link AsynchronousByteChannel}.
* @return A {@link Mono} that completes when transfer is finished.
* @throws NullPointerException When {@code source} is null.
* @throws NullPointerException When {@code destination} is null.
*/
public static Mono<Void> transferAsync(ReadableByteChannel source, AsynchronousByteChannel destination) {
Objects.requireNonNull(source, "'source' must not be null");
Objects.requireNonNull(source, "'destination' must not be null");
return Mono.create(sink -> sink.onRequest(value -> {
ByteBuffer buffer = ByteBuffer.allocate(DEFAULT_BUFFER_SIZE);
try {
transferAsynchronously(source, destination, buffer, sink);
} catch (IOException e) {
sink.error(e);
}
}));
}
private static void transferAsynchronously(
ReadableByteChannel source, AsynchronousByteChannel destination,
ByteBuffer buffer, MonoSink<Void> sink) throws IOException {
buffer.clear();
int read = source.read(buffer);
if (read >= 0) {
buffer.flip();
destination.write(buffer, buffer, new CompletionHandler<Integer, ByteBuffer>() {
@Override
@Override
public void failed(Throwable e, ByteBuffer attachment) {
sink.error(e);
}
});
} else {
sink.success();
}
}
} |
Yes. It unwraps the exception and calls `Subscriber.onError` . The effect is same as `Mono.error`. | public static Mono<Void> writeToWritableByteChannel(Flux<ByteBuffer> content, WritableByteChannel channel) {
if (content == null && channel == null) {
return monoError(LOGGER, new NullPointerException("'content' and 'channel' cannot be null."));
} else if (content == null) {
return monoError(LOGGER, new NullPointerException("'content' cannot be null."));
} else if (channel == null) {
return monoError(LOGGER, new NullPointerException("'channel' cannot be null."));
}
return content.publishOn(Schedulers.boundedElastic())
.map(buffer -> {
while (buffer.hasRemaining()) {
try {
channel.write(buffer);
} catch (IOException e) {
throw Exceptions.propagate(e);
}
}
return buffer;
}).then();
} | throw Exceptions.propagate(e); | public static Mono<Void> writeToWritableByteChannel(Flux<ByteBuffer> content, WritableByteChannel channel) {
if (content == null && channel == null) {
return monoError(LOGGER, new NullPointerException("'content' and 'channel' cannot be null."));
} else if (content == null) {
return monoError(LOGGER, new NullPointerException("'content' cannot be null."));
} else if (channel == null) {
return monoError(LOGGER, new NullPointerException("'channel' cannot be null."));
}
return content.publishOn(Schedulers.boundedElastic())
.map(buffer -> {
while (buffer.hasRemaining()) {
try {
channel.write(buffer);
} catch (IOException e) {
throw Exceptions.propagate(e);
}
}
return buffer;
}).then();
} | class FluxUtil {
private static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
private static final ClientLogger LOGGER = new ClientLogger(FluxUtil.class);
/**
* Checks if a type is Flux<ByteBuffer>.
*
* @param entityType the type to check
* @return whether the type represents a Flux that emits ByteBuffer
*/
public static boolean isFluxByteBuffer(Type entityType) {
if (TypeUtil.isTypeOrSubTypeOf(entityType, Flux.class)) {
final Type innerType = TypeUtil.getTypeArguments(entityType)[0];
return TypeUtil.isTypeOrSubTypeOf(innerType, ByteBuffer.class);
}
return false;
}
/**
* Collects ByteBuffers emitted by a Flux into a byte array.
*
* @param stream A stream which emits ByteBuffer instances.
* @return A Mono which emits the concatenation of all the ByteBuffer instances given by the source Flux.
* @throws IllegalStateException If the combined size of the emitted ByteBuffers is greater than {@link
* Integer
*/
public static Mono<byte[]> collectBytesInByteBufferStream(Flux<ByteBuffer> stream) {
return stream.collect(ByteBufferCollector::new, ByteBufferCollector::write)
.map(ByteBufferCollector::toByteArray);
}
/**
* Collects ByteBuffers emitted by a Flux into a byte array.
* <p>
* Unlike {@link
* This size hint allows for optimizations when creating the initial buffer to reduce the number of times it needs
* to be resized while concatenating emitted ByteBuffers.
*
* @param stream A stream which emits ByteBuffer instances.
* @param sizeHint A hint about the expected stream size.
* @return A Mono which emits the concatenation of all the ByteBuffer instances given by the source Flux.
* @throws IllegalArgumentException If {@code sizeHint} is equal to or less than {@code 0}.
* @throws IllegalStateException If the combined size of the emitted ByteBuffers is greater than {@link
* Integer
*/
public static Mono<byte[]> collectBytesInByteBufferStream(Flux<ByteBuffer> stream, int sizeHint) {
return stream.collect(() -> new ByteBufferCollector(sizeHint), ByteBufferCollector::write)
.map(ByteBufferCollector::toByteArray);
}
/**
* Collects ByteBuffers returned in a network response into a byte array.
* <p>
* The {@code headers} are inspected for containing an {@code Content-Length} which determines if a size hinted
* collection, {@link
*
*
* @param stream A network response ByteBuffer stream.
* @param headers The HTTP headers of the response.
* @return A Mono which emits the collected network response ByteBuffers.
* @throws NullPointerException If {@code headers} is null.
* @throws IllegalStateException If the size of the network response is greater than {@link Integer
*/
public static Mono<byte[]> collectBytesFromNetworkResponse(Flux<ByteBuffer> stream, HttpHeaders headers) {
Objects.requireNonNull(headers, "'headers' cannot be null.");
String contentLengthHeader = headers.getValue("Content-Length");
if (contentLengthHeader == null) {
return FluxUtil.collectBytesInByteBufferStream(stream);
} else {
try {
int contentLength = Integer.parseInt(contentLengthHeader);
if (contentLength > 0) {
return FluxUtil.collectBytesInByteBufferStream(stream, contentLength);
} else {
return Mono.just(EMPTY_BYTE_ARRAY);
}
} catch (NumberFormatException ex) {
return FluxUtil.collectBytesInByteBufferStream(stream);
}
}
}
/**
* Gets the content of the provided ByteBuffer as a byte array. This method will create a new byte array even if the
* ByteBuffer can have optionally backing array.
*
* @param byteBuffer the byte buffer
* @return the byte array
*/
public static byte[] byteBufferToArray(ByteBuffer byteBuffer) {
int length = byteBuffer.remaining();
byte[] byteArray = new byte[length];
byteBuffer.get(byteArray);
return byteArray;
}
/**
* Creates a {@link Flux} that is capable of resuming a download by applying retry logic when an error occurs.
*
* @param downloadSupplier Supplier of the initial download.
* @param onDownloadErrorResume {@link BiFunction} of {@link Throwable} and {@link Long} which is used to resume
* downloading when an error occurs.
* @param maxRetries The maximum number of times a download can be resumed when an error occurs.
* @return A {@link Flux} that downloads reliably.
*/
public static Flux<ByteBuffer> createRetriableDownloadFlux(Supplier<Flux<ByteBuffer>> downloadSupplier,
BiFunction<Throwable, Long, Flux<ByteBuffer>> onDownloadErrorResume, int maxRetries) {
return createRetriableDownloadFlux(downloadSupplier, onDownloadErrorResume, maxRetries, 0L);
}
/**
* Creates a {@link Flux} that is capable of resuming a download by applying retry logic when an error occurs.
*
* @param downloadSupplier Supplier of the initial download.
* @param onDownloadErrorResume {@link BiFunction} of {@link Throwable} and {@link Long} which is used to resume
* downloading when an error occurs.
* @param maxRetries The maximum number of times a download can be resumed when an error occurs.
* @param position The initial offset for the download.
* @return A {@link Flux} that downloads reliably.
*/
public static Flux<ByteBuffer> createRetriableDownloadFlux(Supplier<Flux<ByteBuffer>> downloadSupplier,
BiFunction<Throwable, Long, Flux<ByteBuffer>> onDownloadErrorResume, int maxRetries, long position) {
return new RetriableDownloadFlux(downloadSupplier, onDownloadErrorResume, maxRetries, position);
}
/**
* Converts an {@link InputStream} into a {@link Flux} of {@link ByteBuffer} using a chunk size of 4096.
* <p>
* Given that {@link InputStream} is not guaranteed to be replayable the returned {@link Flux} should be considered
* non-replayable as well.
* <p>
* If the passed {@link InputStream} is {@code null} {@link Flux
*
* @param inputStream The {@link InputStream} to convert into a {@link Flux}.
* @return A {@link Flux} of {@link ByteBuffer ByteBuffers} that contains the contents of the stream.
*/
public static Flux<ByteBuffer> toFluxByteBuffer(InputStream inputStream) {
return toFluxByteBuffer(inputStream, 4096);
}
/**
* Converts an {@link InputStream} into a {@link Flux} of {@link ByteBuffer}.
* <p>
* Given that {@link InputStream} is not guaranteed to be replayable the returned {@link Flux} should be considered
* non-replayable as well.
* <p>
* If the passed {@link InputStream} is {@code null} {@link Flux
*
* @param inputStream The {@link InputStream} to convert into a {@link Flux}.
* @param chunkSize The requested size for each {@link ByteBuffer}.
* @return A {@link Flux} of {@link ByteBuffer ByteBuffers} that contains the contents of the stream.
* @throws IllegalArgumentException If {@code chunkSize} is less than or equal to {@code 0}.
*/
public static Flux<ByteBuffer> toFluxByteBuffer(InputStream inputStream, int chunkSize) {
if (chunkSize <= 0) {
return Flux.error(new IllegalArgumentException("'chunkSize' must be greater than 0."));
}
if (inputStream == null) {
return Flux.empty();
}
if (inputStream instanceof FileInputStream) {
FileChannel fileChannel = ((FileInputStream) inputStream).getChannel();
return Flux.<ByteBuffer, FileChannel>generate(() -> fileChannel, (channel, sink) -> {
try {
long channelPosition = channel.position();
long channelSize = channel.size();
if (channelPosition == channelSize) {
channel.close();
sink.complete();
} else {
int nextByteBufferSize = (int) Math.min(chunkSize, channelSize - channelPosition);
sink.next(channel.map(FileChannel.MapMode.READ_ONLY, channelPosition, nextByteBufferSize));
channel.position(channelPosition + nextByteBufferSize);
}
} catch (IOException ex) {
sink.error(ex);
}
return channel;
});
}
return Flux.<ByteBuffer, InputStream>generate(() -> inputStream, (stream, sink) -> {
byte[] buffer = new byte[chunkSize];
try {
int offset = 0;
while (offset < chunkSize) {
int readCount = inputStream.read(buffer, offset, chunkSize - offset);
if (readCount == -1) {
if (offset > 0) {
sink.next(ByteBuffer.wrap(buffer, 0, offset));
}
sink.complete();
return stream;
}
offset += readCount;
}
sink.next(ByteBuffer.wrap(buffer));
} catch (IOException ex) {
sink.error(ex);
}
return stream;
}).filter(ByteBuffer::hasRemaining);
}
/**
* This method converts the incoming {@code deferContextual} from {@link reactor.util.context.Context Reactor
* Context} to {@link Context Azure Context} and calls the given lambda function with this context and returns a
* single entity of type {@code T}
* <p>
* If the reactor context is empty, {@link Context
* </p>
*
* <p><strong>Code samples</strong></p>
* <!-- src_embed com.azure.core.implementation.util.fluxutil.withcontext -->
* <pre>
* String prefix = "Hello, ";
* Mono<String> response = FluxUtil
* .withContext&
* </pre>
* <!-- end com.azure.core.implementation.util.fluxutil.withcontext -->
*
* @param serviceCall The lambda function that makes the service call into which azure context will be passed
* @param <T> The type of response returned from the service call
* @return The response from service call
*/
public static <T> Mono<T> withContext(Function<Context, Mono<T>> serviceCall) {
return withContext(serviceCall, Collections.emptyMap());
}
/**
* This method converts the incoming {@code deferContextual} from {@link reactor.util.context.Context Reactor
* Context} to {@link Context Azure Context}, adds the specified context attributes and calls the given lambda
* function with this context and returns a single entity of type {@code T}
* <p>
* If the reactor context is empty, {@link Context
* </p>
*
* @param serviceCall serviceCall The lambda function that makes the service call into which azure context will be
* passed
* @param contextAttributes The map of attributes sent by the calling method to be set on {@link Context}.
* @param <T> The type of response returned from the service call
* @return The response from service call
*/
public static <T> Mono<T> withContext(Function<Context, Mono<T>> serviceCall,
Map<String, String> contextAttributes) {
return Mono.deferContextual(context -> {
final Context[] azureContext = new Context[]{Context.NONE};
if (!CoreUtils.isNullOrEmpty(contextAttributes)) {
contextAttributes.forEach((key, value) -> azureContext[0] = azureContext[0].addData(key, value));
}
if (!context.isEmpty()) {
context.stream().forEach(entry ->
azureContext[0] = azureContext[0].addData(entry.getKey(), entry.getValue()));
}
return serviceCall.apply(azureContext[0]);
});
}
/**
* Converts the incoming content to Mono.
*
* @param <T> The type of the Response, which will be returned in the Mono.
* @param response whose {@link Response
* @return The converted {@link Mono}
*/
public static <T> Mono<T> toMono(Response<T> response) {
return Mono.justOrEmpty(response.getValue());
}
/**
* Propagates a {@link RuntimeException} through the error channel of {@link Mono}.
*
* @param logger The {@link ClientLogger} to log the exception.
* @param ex The {@link RuntimeException}.
* @param <T> The return type.
* @return A {@link Mono} that terminates with error wrapping the {@link RuntimeException}.
*/
public static <T> Mono<T> monoError(ClientLogger logger, RuntimeException ex) {
return Mono.error(logger.logExceptionAsError(Exceptions.propagate(ex)));
}
/**
* Propagates a {@link RuntimeException} through the error channel of {@link Mono}.
*
* @param logBuilder The {@link LoggingEventBuilder} with context to log the exception.
* @param ex The {@link RuntimeException}.
* @param <T> The return type.
* @return A {@link Mono} that terminates with error wrapping the {@link RuntimeException}.
*/
public static <T> Mono<T> monoError(LoggingEventBuilder logBuilder, RuntimeException ex) {
return Mono.error(logBuilder.log(Exceptions.propagate(ex)));
}
/**
* Propagates a {@link RuntimeException} through the error channel of {@link Flux}.
*
* @param logger The {@link ClientLogger} to log the exception.
* @param ex The {@link RuntimeException}.
* @param <T> The return type.
* @return A {@link Flux} that terminates with error wrapping the {@link RuntimeException}.
*/
public static <T> Flux<T> fluxError(ClientLogger logger, RuntimeException ex) {
return Flux.error(logger.logExceptionAsError(Exceptions.propagate(ex)));
}
/**
* Propagates a {@link RuntimeException} through the error channel of {@link PagedFlux}.
*
* @param logger The {@link ClientLogger} to log the exception.
* @param ex The {@link RuntimeException}.
* @param <T> The return type.
* @return A {@link PagedFlux} that terminates with error wrapping the {@link RuntimeException}.
*/
public static <T> PagedFlux<T> pagedFluxError(ClientLogger logger, RuntimeException ex) {
return new PagedFlux<>(() -> monoError(logger, ex));
}
/**
* This method converts the incoming {@code deferContextual} from {@link reactor.util.context.Context Reactor
* Context} to {@link Context Azure Context} and calls the given lambda function with this context and returns a
* collection of type {@code T}
* <p>
* If the reactor context is empty, {@link Context
* </p>
*
* <p><strong>Code samples</strong></p>
* <!-- src_embed com.azure.core.implementation.util.fluxutil.fluxcontext -->
* <pre>
* String prefix = "Hello, ";
* Flux<String> response = FluxUtil
* .fluxContext&
* </pre>
* <!-- end com.azure.core.implementation.util.fluxutil.fluxcontext -->
*
* @param serviceCall The lambda function that makes the service call into which the context will be passed
* @param <T> The type of response returned from the service call
* @return The response from service call
*/
public static <T> Flux<T> fluxContext(Function<Context, Flux<T>> serviceCall) {
return Flux.deferContextual(context -> serviceCall.apply(toAzureContext(context)));
}
/**
* Converts a reactor context to azure context. If the reactor context is {@code null} or empty, {@link
* Context
*
* @param context The reactor context
* @return The azure context
*/
private static Context toAzureContext(ContextView context) {
final Context[] azureContext = new Context[]{Context.NONE};
if (!context.isEmpty()) {
context.stream().forEach(entry ->
azureContext[0] = azureContext[0].addData(entry.getKey(), entry.getValue()));
}
return azureContext[0];
}
/**
* Converts an Azure context to Reactor context. If the Azure context is {@code null} or empty, {@link
* reactor.util.context.Context
*
* @param context The Azure context.
* @return The Reactor context.
*/
public static reactor.util.context.Context toReactorContext(Context context) {
if (context == null) {
return reactor.util.context.Context.empty();
}
reactor.util.context.Context returnContext = reactor.util.context.Context.empty();
Context[] contextChain = context.getContextChain();
for (Context toAdd : contextChain) {
if (toAdd == null || toAdd.getValue() == null) {
continue;
}
returnContext = returnContext.put(toAdd.getKey(), toAdd.getValue());
}
return returnContext;
}
/**
* Writes the {@link ByteBuffer ByteBuffers} emitted by a {@link Flux} of {@link ByteBuffer} to an {@link
* OutputStream}.
* <p>
* The {@code stream} is not closed by this call, closing of the {@code stream} is managed by the caller.
* <p>
* The response {@link Mono} will emit an error if {@code content} or {@code stream} are null. Additionally, an
* error will be emitted if an exception occurs while writing the {@code content} to the {@code stream}.
*
* @param content The {@link Flux} of {@link ByteBuffer} content.
* @param stream The {@link OutputStream} being written into.
* @return A {@link Mono} which emits a completion status once the {@link Flux} has been written to the {@link
* OutputStream}, or an error status if writing fails.
*/
public static Mono<Void> writeToOutputStream(Flux<ByteBuffer> content, OutputStream stream) {
if (content == null && stream == null) {
return monoError(LOGGER, new NullPointerException("'content' and 'stream' cannot be null."));
} else if (content == null) {
return monoError(LOGGER, new NullPointerException("'content' cannot be null."));
} else if (stream == null) {
return monoError(LOGGER, new NullPointerException("'stream' cannot be null."));
}
return Mono.create(emitter -> content.subscribe(new OutputStreamWriteSubscriber(emitter, stream, LOGGER)));
}
/**
* Writes the {@link ByteBuffer ByteBuffers} emitted by a {@link Flux} of {@link ByteBuffer} to an {@link
* AsynchronousFileChannel}.
* <p>
* The {@code outFile} is not closed by this call, closing of the {@code outFile} is managed by the caller.
* <p>
* The response {@link Mono} will emit an error if {@code content} or {@code outFile} are null. Additionally, an
* error will be emitted if the {@code outFile} wasn't opened with the proper open options, such as {@link
* StandardOpenOption
*
* @param content The {@link Flux} of {@link ByteBuffer} content.
* @param outFile The {@link AsynchronousFileChannel}.
* @return A {@link Mono} which emits a completion status once the {@link Flux} has been written to the {@link
* AsynchronousFileChannel}.
* @throws NullPointerException When {@code content} is null.
* @throws NullPointerException When {@code outFile} is null.
*/
public static Mono<Void> writeFile(Flux<ByteBuffer> content, AsynchronousFileChannel outFile) {
return writeFile(content, outFile, 0);
}
/**
* Writes the {@link ByteBuffer ByteBuffers} emitted by a {@link Flux} of {@link ByteBuffer} to an {@link
* AsynchronousFileChannel} starting at the given {@code position} in the file.
* <p>
* The {@code outFile} is not closed by this call, closing of the {@code outFile} is managed by the caller.
* <p>
* The response {@link Mono} will emit an error if {@code content} or {@code outFile} are null or {@code position}
* is less than 0. Additionally, an error will be emitted if the {@code outFile} wasn't opened with the proper open
* options, such as {@link StandardOpenOption
*
* @param content The {@link Flux} of {@link ByteBuffer} content.
* @param outFile The {@link AsynchronousFileChannel}.
* @param position The position in the file to begin writing the {@code content}.
* @return A {@link Mono} which emits a completion status once the {@link Flux} has been written to the {@link
* AsynchronousFileChannel}.
* @throws NullPointerException When {@code content} is null.
* @throws NullPointerException When {@code outFile} is null.
* @throws IllegalArgumentException When {@code position} is negative.
*/
public static Mono<Void> writeFile(Flux<ByteBuffer> content, AsynchronousFileChannel outFile, long position) {
if (content == null && outFile == null) {
return monoError(LOGGER, new NullPointerException("'content' and 'outFile' cannot be null."));
} else if (content == null) {
return monoError(LOGGER, new NullPointerException("'content' cannot be null."));
} else if (outFile == null) {
return monoError(LOGGER, new NullPointerException("'outFile' cannot be null."));
} else if (position < 0) {
return monoError(LOGGER, new IllegalArgumentException("'position' cannot be less than 0."));
}
return writeToAsynchronousByteChannel(content, IOUtils.toAsynchronousByteChannel(outFile, position));
}
/**
* Writes the {@link ByteBuffer ByteBuffers} emitted by a {@link Flux} of {@link ByteBuffer} to an {@link
* AsynchronousByteChannel}.
* <p>
* The {@code channel} is not closed by this call, closing of the {@code channel} is managed by the caller.
* <p>
* The response {@link Mono} will emit an error if {@code content} or {@code channel} are null.
*
* @param content The {@link Flux} of {@link ByteBuffer} content.
* @param channel The {@link AsynchronousByteChannel}.
* @return A {@link Mono} which emits a completion status once the {@link Flux} has been written to the {@link
* AsynchronousByteChannel}.
* @throws NullPointerException When {@code content} is null.
* @throws NullPointerException When {@code channel} is null.
*/
public static Mono<Void> writeToAsynchronousByteChannel(Flux<ByteBuffer> content, AsynchronousByteChannel channel) {
if (content == null && channel == null) {
return monoError(LOGGER, new NullPointerException("'content' and 'channel' cannot be null."));
} else if (content == null) {
return monoError(LOGGER, new NullPointerException("'content' cannot be null."));
} else if (channel == null) {
return monoError(LOGGER, new NullPointerException("'channel' cannot be null."));
}
return Mono.create(emitter -> content.subscribe(
new AsynchronousByteChannelWriteSubscriber(channel, emitter)));
}
/**
* Writes the {@link ByteBuffer ByteBuffers} emitted by a {@link Flux} of {@link ByteBuffer} to an {@link
* WritableByteChannel}.
* <p>
* The {@code channel} is not closed by this call, closing of the {@code channel} is managed by the caller.
* <p>
* The response {@link Mono} will emit an error if {@code content} or {@code channel} are null.
*
* @param content The {@link Flux} of {@link ByteBuffer} content.
* @param channel The {@link WritableByteChannel}.
* @return A {@link Mono} which emits a completion status once the {@link Flux} has been written to the {@link
* WritableByteChannel}.
* @throws NullPointerException When {@code content} is null.
* @throws NullPointerException When {@code channel} is null.
*/
/**
* Creates a {@link Flux} from an {@link AsynchronousFileChannel} which reads part of a file into chunks of the
* given size.
*
* @param fileChannel The file channel.
* @param chunkSize the size of file chunks to read.
* @param offset The offset in the file to begin reading.
* @param length The number of bytes to read from the file.
* @return the Flux.
*/
public static Flux<ByteBuffer> readFile(AsynchronousFileChannel fileChannel, int chunkSize, long offset,
long length) {
return new FileReadFlux(fileChannel, chunkSize, offset, length);
}
/**
* Creates a {@link Flux} from an {@link AsynchronousFileChannel} which reads part of a file.
*
* @param fileChannel The file channel.
* @param offset The offset in the file to begin reading.
* @param length The number of bytes to read from the file.
* @return the Flux.
*/
public static Flux<ByteBuffer> readFile(AsynchronousFileChannel fileChannel, long offset, long length) {
return readFile(fileChannel, DEFAULT_CHUNK_SIZE, offset, length);
}
/**
* Creates a {@link Flux} from an {@link AsynchronousFileChannel} which reads the entire file.
*
* @param fileChannel The file channel.
* @return The AsyncInputStream.
*/
public static Flux<ByteBuffer> readFile(AsynchronousFileChannel fileChannel) {
try {
long size = fileChannel.size();
return readFile(fileChannel, DEFAULT_CHUNK_SIZE, 0, size);
} catch (IOException e) {
return Flux.error(new RuntimeException("Failed to read the file.", e));
}
}
private static final int DEFAULT_CHUNK_SIZE = 1024 * 64;
private static final class FileReadFlux extends Flux<ByteBuffer> {
private final AsynchronousFileChannel fileChannel;
private final int chunkSize;
private final long offset;
private final long length;
FileReadFlux(AsynchronousFileChannel fileChannel, int chunkSize, long offset, long length) {
this.fileChannel = fileChannel;
this.chunkSize = chunkSize;
this.offset = offset;
this.length = length;
}
@Override
public void subscribe(CoreSubscriber<? super ByteBuffer> actual) {
FileReadSubscription subscription =
new FileReadSubscription(actual, fileChannel, chunkSize, offset, length);
actual.onSubscribe(subscription);
}
static final class FileReadSubscription implements Subscription, CompletionHandler<Integer, ByteBuffer> {
private static final int NOT_SET = -1;
private static final long serialVersionUID = -6831808726875304256L;
private final Subscriber<? super ByteBuffer> subscriber;
private volatile long position;
private final AsynchronousFileChannel fileChannel;
private final int chunkSize;
private final long offset;
private final long length;
private volatile boolean done;
private Throwable error;
private volatile ByteBuffer next;
private volatile boolean cancelled;
volatile int wip;
static final AtomicIntegerFieldUpdater<FileReadSubscription> WIP =
AtomicIntegerFieldUpdater.newUpdater(FileReadSubscription.class, "wip");
volatile long requested;
static final AtomicLongFieldUpdater<FileReadSubscription> REQUESTED =
AtomicLongFieldUpdater.newUpdater(FileReadSubscription.class, "requested");
FileReadSubscription(Subscriber<? super ByteBuffer> subscriber, AsynchronousFileChannel fileChannel,
int chunkSize, long offset, long length) {
this.subscriber = subscriber;
this.fileChannel = fileChannel;
this.chunkSize = chunkSize;
this.offset = offset;
this.length = length;
this.position = NOT_SET;
}
@Override
public void request(long n) {
if (Operators.validate(n)) {
Operators.addCap(REQUESTED, this, n);
drain();
}
}
@Override
public void cancel() {
this.cancelled = true;
}
@Override
public void completed(Integer bytesRead, ByteBuffer buffer) {
if (!cancelled) {
if (bytesRead == -1) {
done = true;
} else {
long pos = position;
int bytesWanted = Math.min(bytesRead, maxRequired(pos));
long position2 = pos + bytesWanted;
position = position2;
buffer.position(bytesWanted);
buffer.flip();
next = buffer;
if (position2 >= offset + length) {
done = true;
}
}
drain();
}
}
@Override
public void failed(Throwable exc, ByteBuffer attachment) {
if (!cancelled) {
error = exc;
done = true;
drain();
}
}
private void drain() {
if (WIP.getAndIncrement(this) != 0) {
return;
}
if (position == NOT_SET) {
position = offset;
doRead();
}
int missed = 1;
while (true) {
if (cancelled) {
return;
}
if (REQUESTED.get(this) > 0) {
boolean emitted = false;
boolean d = done;
ByteBuffer bb = next;
if (bb != null) {
next = null;
subscriber.onNext(bb);
emitted = true;
}
if (d) {
if (error != null) {
subscriber.onError(error);
} else {
subscriber.onComplete();
}
return;
}
if (emitted) {
Operators.produced(REQUESTED, this, 1);
doRead();
}
}
missed = WIP.addAndGet(this, -missed);
if (missed == 0) {
return;
}
}
}
private void doRead() {
long pos = position;
ByteBuffer innerBuf = ByteBuffer.allocate(Math.min(chunkSize, maxRequired(pos)));
fileChannel.read(innerBuf, pos, innerBuf, this);
}
private int maxRequired(long pos) {
long maxRequired = offset + length - pos;
if (maxRequired <= 0) {
return 0;
} else {
int m = (int) (maxRequired);
if (m < 0) {
return Integer.MAX_VALUE;
} else {
return m;
}
}
}
}
}
private FluxUtil() {
}
} | class FluxUtil {
private static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
private static final ClientLogger LOGGER = new ClientLogger(FluxUtil.class);
/**
* Checks if a type is Flux<ByteBuffer>.
*
* @param entityType the type to check
* @return whether the type represents a Flux that emits ByteBuffer
*/
public static boolean isFluxByteBuffer(Type entityType) {
if (TypeUtil.isTypeOrSubTypeOf(entityType, Flux.class)) {
final Type innerType = TypeUtil.getTypeArguments(entityType)[0];
return TypeUtil.isTypeOrSubTypeOf(innerType, ByteBuffer.class);
}
return false;
}
/**
* Collects ByteBuffers emitted by a Flux into a byte array.
*
* @param stream A stream which emits ByteBuffer instances.
* @return A Mono which emits the concatenation of all the ByteBuffer instances given by the source Flux.
* @throws IllegalStateException If the combined size of the emitted ByteBuffers is greater than {@link
* Integer
*/
public static Mono<byte[]> collectBytesInByteBufferStream(Flux<ByteBuffer> stream) {
return stream.collect(ByteBufferCollector::new, ByteBufferCollector::write)
.map(ByteBufferCollector::toByteArray);
}
/**
* Collects ByteBuffers emitted by a Flux into a byte array.
* <p>
* Unlike {@link
* This size hint allows for optimizations when creating the initial buffer to reduce the number of times it needs
* to be resized while concatenating emitted ByteBuffers.
*
* @param stream A stream which emits ByteBuffer instances.
* @param sizeHint A hint about the expected stream size.
* @return A Mono which emits the concatenation of all the ByteBuffer instances given by the source Flux.
* @throws IllegalArgumentException If {@code sizeHint} is equal to or less than {@code 0}.
* @throws IllegalStateException If the combined size of the emitted ByteBuffers is greater than {@link
* Integer
*/
public static Mono<byte[]> collectBytesInByteBufferStream(Flux<ByteBuffer> stream, int sizeHint) {
return stream.collect(() -> new ByteBufferCollector(sizeHint), ByteBufferCollector::write)
.map(ByteBufferCollector::toByteArray);
}
/**
* Collects ByteBuffers returned in a network response into a byte array.
* <p>
* The {@code headers} are inspected for containing an {@code Content-Length} which determines if a size hinted
* collection, {@link
*
*
* @param stream A network response ByteBuffer stream.
* @param headers The HTTP headers of the response.
* @return A Mono which emits the collected network response ByteBuffers.
* @throws NullPointerException If {@code headers} is null.
* @throws IllegalStateException If the size of the network response is greater than {@link Integer
*/
public static Mono<byte[]> collectBytesFromNetworkResponse(Flux<ByteBuffer> stream, HttpHeaders headers) {
Objects.requireNonNull(headers, "'headers' cannot be null.");
String contentLengthHeader = headers.getValue("Content-Length");
if (contentLengthHeader == null) {
return FluxUtil.collectBytesInByteBufferStream(stream);
} else {
try {
int contentLength = Integer.parseInt(contentLengthHeader);
if (contentLength > 0) {
return FluxUtil.collectBytesInByteBufferStream(stream, contentLength);
} else {
return Mono.just(EMPTY_BYTE_ARRAY);
}
} catch (NumberFormatException ex) {
return FluxUtil.collectBytesInByteBufferStream(stream);
}
}
}
/**
* Gets the content of the provided ByteBuffer as a byte array. This method will create a new byte array even if the
* ByteBuffer can have optionally backing array.
*
* @param byteBuffer the byte buffer
* @return the byte array
*/
public static byte[] byteBufferToArray(ByteBuffer byteBuffer) {
int length = byteBuffer.remaining();
byte[] byteArray = new byte[length];
byteBuffer.get(byteArray);
return byteArray;
}
/**
* Creates a {@link Flux} that is capable of resuming a download by applying retry logic when an error occurs.
*
* @param downloadSupplier Supplier of the initial download.
* @param onDownloadErrorResume {@link BiFunction} of {@link Throwable} and {@link Long} which is used to resume
* downloading when an error occurs.
* @param maxRetries The maximum number of times a download can be resumed when an error occurs.
* @return A {@link Flux} that downloads reliably.
*/
public static Flux<ByteBuffer> createRetriableDownloadFlux(Supplier<Flux<ByteBuffer>> downloadSupplier,
BiFunction<Throwable, Long, Flux<ByteBuffer>> onDownloadErrorResume, int maxRetries) {
return createRetriableDownloadFlux(downloadSupplier, onDownloadErrorResume, maxRetries, 0L);
}
/**
* Creates a {@link Flux} that is capable of resuming a download by applying retry logic when an error occurs.
*
* @param downloadSupplier Supplier of the initial download.
* @param onDownloadErrorResume {@link BiFunction} of {@link Throwable} and {@link Long} which is used to resume
* downloading when an error occurs.
* @param maxRetries The maximum number of times a download can be resumed when an error occurs.
* @param position The initial offset for the download.
* @return A {@link Flux} that downloads reliably.
*/
public static Flux<ByteBuffer> createRetriableDownloadFlux(Supplier<Flux<ByteBuffer>> downloadSupplier,
BiFunction<Throwable, Long, Flux<ByteBuffer>> onDownloadErrorResume, int maxRetries, long position) {
return new RetriableDownloadFlux(downloadSupplier, onDownloadErrorResume, maxRetries, position);
}
/**
* Converts an {@link InputStream} into a {@link Flux} of {@link ByteBuffer} using a chunk size of 4096.
* <p>
* Given that {@link InputStream} is not guaranteed to be replayable the returned {@link Flux} should be considered
* non-replayable as well.
* <p>
* If the passed {@link InputStream} is {@code null} {@link Flux
*
* @param inputStream The {@link InputStream} to convert into a {@link Flux}.
* @return A {@link Flux} of {@link ByteBuffer ByteBuffers} that contains the contents of the stream.
*/
public static Flux<ByteBuffer> toFluxByteBuffer(InputStream inputStream) {
return toFluxByteBuffer(inputStream, 4096);
}
/**
* Converts an {@link InputStream} into a {@link Flux} of {@link ByteBuffer}.
* <p>
* Given that {@link InputStream} is not guaranteed to be replayable the returned {@link Flux} should be considered
* non-replayable as well.
* <p>
* If the passed {@link InputStream} is {@code null} {@link Flux
*
* @param inputStream The {@link InputStream} to convert into a {@link Flux}.
* @param chunkSize The requested size for each {@link ByteBuffer}.
* @return A {@link Flux} of {@link ByteBuffer ByteBuffers} that contains the contents of the stream.
* @throws IllegalArgumentException If {@code chunkSize} is less than or equal to {@code 0}.
*/
public static Flux<ByteBuffer> toFluxByteBuffer(InputStream inputStream, int chunkSize) {
if (chunkSize <= 0) {
return Flux.error(new IllegalArgumentException("'chunkSize' must be greater than 0."));
}
if (inputStream == null) {
return Flux.empty();
}
if (inputStream instanceof FileInputStream) {
FileChannel fileChannel = ((FileInputStream) inputStream).getChannel();
return Flux.<ByteBuffer, FileChannel>generate(() -> fileChannel, (channel, sink) -> {
try {
long channelPosition = channel.position();
long channelSize = channel.size();
if (channelPosition == channelSize) {
channel.close();
sink.complete();
} else {
int nextByteBufferSize = (int) Math.min(chunkSize, channelSize - channelPosition);
sink.next(channel.map(FileChannel.MapMode.READ_ONLY, channelPosition, nextByteBufferSize));
channel.position(channelPosition + nextByteBufferSize);
}
} catch (IOException ex) {
sink.error(ex);
}
return channel;
});
}
return Flux.<ByteBuffer, InputStream>generate(() -> inputStream, (stream, sink) -> {
byte[] buffer = new byte[chunkSize];
try {
int offset = 0;
while (offset < chunkSize) {
int readCount = inputStream.read(buffer, offset, chunkSize - offset);
if (readCount == -1) {
if (offset > 0) {
sink.next(ByteBuffer.wrap(buffer, 0, offset));
}
sink.complete();
return stream;
}
offset += readCount;
}
sink.next(ByteBuffer.wrap(buffer));
} catch (IOException ex) {
sink.error(ex);
}
return stream;
}).filter(ByteBuffer::hasRemaining);
}
/**
* This method converts the incoming {@code deferContextual} from {@link reactor.util.context.Context Reactor
* Context} to {@link Context Azure Context} and calls the given lambda function with this context and returns a
* single entity of type {@code T}
* <p>
* If the reactor context is empty, {@link Context
* </p>
*
* <p><strong>Code samples</strong></p>
* <!-- src_embed com.azure.core.implementation.util.fluxutil.withcontext -->
* <pre>
* String prefix = "Hello, ";
* Mono<String> response = FluxUtil
* .withContext&
* </pre>
* <!-- end com.azure.core.implementation.util.fluxutil.withcontext -->
*
* @param serviceCall The lambda function that makes the service call into which azure context will be passed
* @param <T> The type of response returned from the service call
* @return The response from service call
*/
public static <T> Mono<T> withContext(Function<Context, Mono<T>> serviceCall) {
return withContext(serviceCall, Collections.emptyMap());
}
/**
* This method converts the incoming {@code deferContextual} from {@link reactor.util.context.Context Reactor
* Context} to {@link Context Azure Context}, adds the specified context attributes and calls the given lambda
* function with this context and returns a single entity of type {@code T}
* <p>
* If the reactor context is empty, {@link Context
* </p>
*
* @param serviceCall serviceCall The lambda function that makes the service call into which azure context will be
* passed
* @param contextAttributes The map of attributes sent by the calling method to be set on {@link Context}.
* @param <T> The type of response returned from the service call
* @return The response from service call
*/
public static <T> Mono<T> withContext(Function<Context, Mono<T>> serviceCall,
Map<String, String> contextAttributes) {
return Mono.deferContextual(context -> {
final Context[] azureContext = new Context[]{Context.NONE};
if (!CoreUtils.isNullOrEmpty(contextAttributes)) {
contextAttributes.forEach((key, value) -> azureContext[0] = azureContext[0].addData(key, value));
}
if (!context.isEmpty()) {
context.stream().forEach(entry ->
azureContext[0] = azureContext[0].addData(entry.getKey(), entry.getValue()));
}
return serviceCall.apply(azureContext[0]);
});
}
/**
* Converts the incoming content to Mono.
*
* @param <T> The type of the Response, which will be returned in the Mono.
* @param response whose {@link Response
* @return The converted {@link Mono}
*/
public static <T> Mono<T> toMono(Response<T> response) {
return Mono.justOrEmpty(response.getValue());
}
/**
* Propagates a {@link RuntimeException} through the error channel of {@link Mono}.
*
* @param logger The {@link ClientLogger} to log the exception.
* @param ex The {@link RuntimeException}.
* @param <T> The return type.
* @return A {@link Mono} that terminates with error wrapping the {@link RuntimeException}.
*/
public static <T> Mono<T> monoError(ClientLogger logger, RuntimeException ex) {
return Mono.error(logger.logExceptionAsError(Exceptions.propagate(ex)));
}
/**
* Propagates a {@link RuntimeException} through the error channel of {@link Mono}.
*
* @param logBuilder The {@link LoggingEventBuilder} with context to log the exception.
* @param ex The {@link RuntimeException}.
* @param <T> The return type.
* @return A {@link Mono} that terminates with error wrapping the {@link RuntimeException}.
*/
public static <T> Mono<T> monoError(LoggingEventBuilder logBuilder, RuntimeException ex) {
return Mono.error(logBuilder.log(Exceptions.propagate(ex)));
}
/**
* Propagates a {@link RuntimeException} through the error channel of {@link Flux}.
*
* @param logger The {@link ClientLogger} to log the exception.
* @param ex The {@link RuntimeException}.
* @param <T> The return type.
* @return A {@link Flux} that terminates with error wrapping the {@link RuntimeException}.
*/
public static <T> Flux<T> fluxError(ClientLogger logger, RuntimeException ex) {
return Flux.error(logger.logExceptionAsError(Exceptions.propagate(ex)));
}
/**
* Propagates a {@link RuntimeException} through the error channel of {@link PagedFlux}.
*
* @param logger The {@link ClientLogger} to log the exception.
* @param ex The {@link RuntimeException}.
* @param <T> The return type.
* @return A {@link PagedFlux} that terminates with error wrapping the {@link RuntimeException}.
*/
public static <T> PagedFlux<T> pagedFluxError(ClientLogger logger, RuntimeException ex) {
return new PagedFlux<>(() -> monoError(logger, ex));
}
/**
* This method converts the incoming {@code deferContextual} from {@link reactor.util.context.Context Reactor
* Context} to {@link Context Azure Context} and calls the given lambda function with this context and returns a
* collection of type {@code T}
* <p>
* If the reactor context is empty, {@link Context
* </p>
*
* <p><strong>Code samples</strong></p>
* <!-- src_embed com.azure.core.implementation.util.fluxutil.fluxcontext -->
* <pre>
* String prefix = "Hello, ";
* Flux<String> response = FluxUtil
* .fluxContext&
* </pre>
* <!-- end com.azure.core.implementation.util.fluxutil.fluxcontext -->
*
* @param serviceCall The lambda function that makes the service call into which the context will be passed
* @param <T> The type of response returned from the service call
* @return The response from service call
*/
public static <T> Flux<T> fluxContext(Function<Context, Flux<T>> serviceCall) {
return Flux.deferContextual(context -> serviceCall.apply(toAzureContext(context)));
}
/**
* Converts a reactor context to azure context. If the reactor context is {@code null} or empty, {@link
* Context
*
* @param context The reactor context
* @return The azure context
*/
private static Context toAzureContext(ContextView context) {
final Context[] azureContext = new Context[]{Context.NONE};
if (!context.isEmpty()) {
context.stream().forEach(entry ->
azureContext[0] = azureContext[0].addData(entry.getKey(), entry.getValue()));
}
return azureContext[0];
}
/**
* Converts an Azure context to Reactor context. If the Azure context is {@code null} or empty, {@link
* reactor.util.context.Context
*
* @param context The Azure context.
* @return The Reactor context.
*/
public static reactor.util.context.Context toReactorContext(Context context) {
if (context == null) {
return reactor.util.context.Context.empty();
}
reactor.util.context.Context returnContext = reactor.util.context.Context.empty();
Context[] contextChain = context.getContextChain();
for (Context toAdd : contextChain) {
if (toAdd == null || toAdd.getValue() == null) {
continue;
}
returnContext = returnContext.put(toAdd.getKey(), toAdd.getValue());
}
return returnContext;
}
/**
* Writes the {@link ByteBuffer ByteBuffers} emitted by a {@link Flux} of {@link ByteBuffer} to an {@link
* OutputStream}.
* <p>
* The {@code stream} is not closed by this call, closing of the {@code stream} is managed by the caller.
* <p>
* The response {@link Mono} will emit an error if {@code content} or {@code stream} are null. Additionally, an
* error will be emitted if an exception occurs while writing the {@code content} to the {@code stream}.
*
* @param content The {@link Flux} of {@link ByteBuffer} content.
* @param stream The {@link OutputStream} being written into.
* @return A {@link Mono} which emits a completion status once the {@link Flux} has been written to the {@link
* OutputStream}, or an error status if writing fails.
*/
public static Mono<Void> writeToOutputStream(Flux<ByteBuffer> content, OutputStream stream) {
if (content == null && stream == null) {
return monoError(LOGGER, new NullPointerException("'content' and 'stream' cannot be null."));
} else if (content == null) {
return monoError(LOGGER, new NullPointerException("'content' cannot be null."));
} else if (stream == null) {
return monoError(LOGGER, new NullPointerException("'stream' cannot be null."));
}
return Mono.create(emitter -> content.subscribe(new OutputStreamWriteSubscriber(emitter, stream, LOGGER)));
}
/**
* Writes the {@link ByteBuffer ByteBuffers} emitted by a {@link Flux} of {@link ByteBuffer} to an {@link
* AsynchronousFileChannel}.
* <p>
* The {@code outFile} is not closed by this call, closing of the {@code outFile} is managed by the caller.
* <p>
* The response {@link Mono} will emit an error if {@code content} or {@code outFile} are null. Additionally, an
* error will be emitted if the {@code outFile} wasn't opened with the proper open options, such as {@link
* StandardOpenOption
*
* @param content The {@link Flux} of {@link ByteBuffer} content.
* @param outFile The {@link AsynchronousFileChannel}.
* @return A {@link Mono} which emits a completion status once the {@link Flux} has been written to the {@link
* AsynchronousFileChannel}.
* @throws NullPointerException When {@code content} is null.
* @throws NullPointerException When {@code outFile} is null.
*/
public static Mono<Void> writeFile(Flux<ByteBuffer> content, AsynchronousFileChannel outFile) {
return writeFile(content, outFile, 0);
}
/**
* Writes the {@link ByteBuffer ByteBuffers} emitted by a {@link Flux} of {@link ByteBuffer} to an {@link
* AsynchronousFileChannel} starting at the given {@code position} in the file.
* <p>
* The {@code outFile} is not closed by this call, closing of the {@code outFile} is managed by the caller.
* <p>
* The response {@link Mono} will emit an error if {@code content} or {@code outFile} are null or {@code position}
* is less than 0. Additionally, an error will be emitted if the {@code outFile} wasn't opened with the proper open
* options, such as {@link StandardOpenOption
*
* @param content The {@link Flux} of {@link ByteBuffer} content.
* @param outFile The {@link AsynchronousFileChannel}.
* @param position The position in the file to begin writing the {@code content}.
* @return A {@link Mono} which emits a completion status once the {@link Flux} has been written to the {@link
* AsynchronousFileChannel}.
* @throws NullPointerException When {@code content} is null.
* @throws NullPointerException When {@code outFile} is null.
* @throws IllegalArgumentException When {@code position} is negative.
*/
public static Mono<Void> writeFile(Flux<ByteBuffer> content, AsynchronousFileChannel outFile, long position) {
if (content == null && outFile == null) {
return monoError(LOGGER, new NullPointerException("'content' and 'outFile' cannot be null."));
} else if (content == null) {
return monoError(LOGGER, new NullPointerException("'content' cannot be null."));
} else if (outFile == null) {
return monoError(LOGGER, new NullPointerException("'outFile' cannot be null."));
} else if (position < 0) {
return monoError(LOGGER, new IllegalArgumentException("'position' cannot be less than 0."));
}
return writeToAsynchronousByteChannel(content, IOUtils.toAsynchronousByteChannel(outFile, position));
}
/**
* Writes the {@link ByteBuffer ByteBuffers} emitted by a {@link Flux} of {@link ByteBuffer} to an {@link
* AsynchronousByteChannel}.
* <p>
* The {@code channel} is not closed by this call, closing of the {@code channel} is managed by the caller.
* <p>
* The response {@link Mono} will emit an error if {@code content} or {@code channel} are null.
*
* @param content The {@link Flux} of {@link ByteBuffer} content.
* @param channel The {@link AsynchronousByteChannel}.
* @return A {@link Mono} which emits a completion status once the {@link Flux} has been written to the {@link
* AsynchronousByteChannel}.
* @throws NullPointerException When {@code content} is null.
* @throws NullPointerException When {@code channel} is null.
*/
public static Mono<Void> writeToAsynchronousByteChannel(Flux<ByteBuffer> content, AsynchronousByteChannel channel) {
if (content == null && channel == null) {
return monoError(LOGGER, new NullPointerException("'content' and 'channel' cannot be null."));
} else if (content == null) {
return monoError(LOGGER, new NullPointerException("'content' cannot be null."));
} else if (channel == null) {
return monoError(LOGGER, new NullPointerException("'channel' cannot be null."));
}
return Mono.create(emitter -> content.subscribe(
new AsynchronousByteChannelWriteSubscriber(channel, emitter)));
}
/**
* Writes the {@link ByteBuffer ByteBuffers} emitted by a {@link Flux} of {@link ByteBuffer} to an {@link
* WritableByteChannel}.
* <p>
* The {@code channel} is not closed by this call, closing of the {@code channel} is managed by the caller.
* <p>
* The response {@link Mono} will emit an error if {@code content} or {@code channel} are null.
*
* @param content The {@link Flux} of {@link ByteBuffer} content.
* @param channel The {@link WritableByteChannel}.
* @return A {@link Mono} which emits a completion status once the {@link Flux} has been written to the {@link
* WritableByteChannel}.
* @throws NullPointerException When {@code content} is null.
* @throws NullPointerException When {@code channel} is null.
*/
/**
* Creates a {@link Flux} from an {@link AsynchronousFileChannel} which reads part of a file into chunks of the
* given size.
*
* @param fileChannel The file channel.
* @param chunkSize the size of file chunks to read.
* @param offset The offset in the file to begin reading.
* @param length The number of bytes to read from the file.
* @return the Flux.
*/
public static Flux<ByteBuffer> readFile(AsynchronousFileChannel fileChannel, int chunkSize, long offset,
long length) {
return new FileReadFlux(fileChannel, chunkSize, offset, length);
}
/**
* Creates a {@link Flux} from an {@link AsynchronousFileChannel} which reads part of a file.
*
* @param fileChannel The file channel.
* @param offset The offset in the file to begin reading.
* @param length The number of bytes to read from the file.
* @return the Flux.
*/
public static Flux<ByteBuffer> readFile(AsynchronousFileChannel fileChannel, long offset, long length) {
return readFile(fileChannel, DEFAULT_CHUNK_SIZE, offset, length);
}
/**
* Creates a {@link Flux} from an {@link AsynchronousFileChannel} which reads the entire file.
*
* @param fileChannel The file channel.
* @return The AsyncInputStream.
*/
public static Flux<ByteBuffer> readFile(AsynchronousFileChannel fileChannel) {
try {
long size = fileChannel.size();
return readFile(fileChannel, DEFAULT_CHUNK_SIZE, 0, size);
} catch (IOException e) {
return Flux.error(new RuntimeException("Failed to read the file.", e));
}
}
private static final int DEFAULT_CHUNK_SIZE = 1024 * 64;
private static final class FileReadFlux extends Flux<ByteBuffer> {
private final AsynchronousFileChannel fileChannel;
private final int chunkSize;
private final long offset;
private final long length;
FileReadFlux(AsynchronousFileChannel fileChannel, int chunkSize, long offset, long length) {
this.fileChannel = fileChannel;
this.chunkSize = chunkSize;
this.offset = offset;
this.length = length;
}
@Override
public void subscribe(CoreSubscriber<? super ByteBuffer> actual) {
FileReadSubscription subscription =
new FileReadSubscription(actual, fileChannel, chunkSize, offset, length);
actual.onSubscribe(subscription);
}
static final class FileReadSubscription implements Subscription, CompletionHandler<Integer, ByteBuffer> {
private static final int NOT_SET = -1;
private static final long serialVersionUID = -6831808726875304256L;
private final Subscriber<? super ByteBuffer> subscriber;
private volatile long position;
private final AsynchronousFileChannel fileChannel;
private final int chunkSize;
private final long offset;
private final long length;
private volatile boolean done;
private Throwable error;
private volatile ByteBuffer next;
private volatile boolean cancelled;
volatile int wip;
static final AtomicIntegerFieldUpdater<FileReadSubscription> WIP =
AtomicIntegerFieldUpdater.newUpdater(FileReadSubscription.class, "wip");
volatile long requested;
static final AtomicLongFieldUpdater<FileReadSubscription> REQUESTED =
AtomicLongFieldUpdater.newUpdater(FileReadSubscription.class, "requested");
FileReadSubscription(Subscriber<? super ByteBuffer> subscriber, AsynchronousFileChannel fileChannel,
int chunkSize, long offset, long length) {
this.subscriber = subscriber;
this.fileChannel = fileChannel;
this.chunkSize = chunkSize;
this.offset = offset;
this.length = length;
this.position = NOT_SET;
}
@Override
public void request(long n) {
if (Operators.validate(n)) {
Operators.addCap(REQUESTED, this, n);
drain();
}
}
@Override
public void cancel() {
this.cancelled = true;
}
@Override
public void completed(Integer bytesRead, ByteBuffer buffer) {
if (!cancelled) {
if (bytesRead == -1) {
done = true;
} else {
long pos = position;
int bytesWanted = Math.min(bytesRead, maxRequired(pos));
long position2 = pos + bytesWanted;
position = position2;
buffer.position(bytesWanted);
buffer.flip();
next = buffer;
if (position2 >= offset + length) {
done = true;
}
}
drain();
}
}
@Override
public void failed(Throwable exc, ByteBuffer attachment) {
if (!cancelled) {
error = exc;
done = true;
drain();
}
}
private void drain() {
if (WIP.getAndIncrement(this) != 0) {
return;
}
if (position == NOT_SET) {
position = offset;
doRead();
}
int missed = 1;
while (true) {
if (cancelled) {
return;
}
if (REQUESTED.get(this) > 0) {
boolean emitted = false;
boolean d = done;
ByteBuffer bb = next;
if (bb != null) {
next = null;
subscriber.onNext(bb);
emitted = true;
}
if (d) {
if (error != null) {
subscriber.onError(error);
} else {
subscriber.onComplete();
}
return;
}
if (emitted) {
Operators.produced(REQUESTED, this, 1);
doRead();
}
}
missed = WIP.addAndGet(this, -missed);
if (missed == 0) {
return;
}
}
}
private void doRead() {
long pos = position;
ByteBuffer innerBuf = ByteBuffer.allocate(Math.min(chunkSize, maxRequired(pos)));
fileChannel.read(innerBuf, pos, innerBuf, this);
}
private int maxRequired(long pos) {
long maxRequired = offset + length - pos;
if (maxRequired <= 0) {
return 0;
} else {
int m = (int) (maxRequired);
if (m < 0) {
return Integer.MAX_VALUE;
} else {
return m;
}
}
}
}
}
private FluxUtil() {
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.