comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
```suggestion reason, serviceBusException.getMessage()); ``` | public static void main(String[] args) throws InterruptedException {
Consumer<ServiceBusReceivedMessageContext> messageProcessor = context -> {
ServiceBusReceivedMessage message = context.getMessage();
System.out.println("Received message " + message.getBody().toString());
};
final CountDownLatch countdownLatch = new CountDownLatch(1);
Consumer<ServiceBusErrorContext> errorHandler = errorContext -> {
if (errorContext.getException() instanceof ServiceBusException) {
final ServiceBusException serviceBusException = (ServiceBusException) errorContext.getException();
final ServiceBusFailureReason reason = serviceBusException.getReason();
if (reason == ServiceBusFailureReason.MESSAGING_ENTITY_DISABLED
|| reason == ServiceBusFailureReason.MESSAGING_ENTITY_NOT_FOUND
|| reason == ServiceBusFailureReason.UNAUTHORIZED) {
System.out.printf("An unrecoverable error occurred. Stopping processing with reason %s: %s\n",
reason, serviceBusException.toString());
countdownLatch.countDown();
} else if (reason == ServiceBusFailureReason.MESSAGE_LOCK_LOST) {
System.out.printf("Message lock lost for message: %s", errorContext.getException().toString());
} else if (reason == ServiceBusFailureReason.SERVICE_BUSY) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
} else {
System.out.printf("Error source %s, reason %s, message: %s\n", serviceBusException.getErrorSource(),
reason, errorContext.getException().toString());
}
} else {
System.out.printf("Exception: %s\n", errorContext.getException().toString());
}
};
final ServiceBusProcessorClient processorClient = new ServiceBusClientBuilder()
.connectionString("<< connection-string >>")
.processor()
.queueName("<< queue name >>")
.processMessage(messageProcessor)
.processError(errorHandler)
.buildProcessorClient();
System.out.println("Starting the processor");
processorClient.start();
System.out.println("Listening for 10 seconds...");
if (countdownLatch.await(10, TimeUnit.SECONDS)) {
System.out.println("Closing processor due to fatal error");
} else {
System.out.println("Closing processor");
}
processorClient.close();
} | reason, serviceBusException.toString()); | public static void main(String[] args) throws InterruptedException {
Consumer<ServiceBusReceivedMessageContext> messageProcessor = context -> {
ServiceBusReceivedMessage message = context.getMessage();
System.out.println("Received message " + message.getBody().toString());
};
final CountDownLatch countdownLatch = new CountDownLatch(1);
Consumer<ServiceBusErrorContext> errorHandler = errorContext -> {
if (errorContext.getException() instanceof ServiceBusException) {
final ServiceBusException serviceBusException = (ServiceBusException) errorContext.getException();
final ServiceBusFailureReason reason = serviceBusException.getReason();
if (reason == ServiceBusFailureReason.MESSAGING_ENTITY_DISABLED
|| reason == ServiceBusFailureReason.MESSAGING_ENTITY_NOT_FOUND
|| reason == ServiceBusFailureReason.UNAUTHORIZED) {
System.out.printf("An unrecoverable error occurred. Stopping processing with reason %s: %s\n",
reason, serviceBusException.getMessage());
countdownLatch.countDown();
} else if (reason == ServiceBusFailureReason.MESSAGE_LOCK_LOST) {
System.out.printf("Message lock lost for message: %s", errorContext.getException().toString());
} else if (reason == ServiceBusFailureReason.SERVICE_BUSY) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
} else {
System.out.printf("Error source %s, reason %s, message: %s\n", serviceBusException.getErrorSource(),
reason, errorContext.getException().getMessage());
}
} else {
System.out.printf("Exception: %s\n", errorContext.getException().toString());
}
};
final ServiceBusProcessorClient processorClient = new ServiceBusClientBuilder()
.connectionString("<< connection-string >>")
.processor()
.queueName("<< queue name >>")
.processMessage(messageProcessor)
.processError(errorHandler)
.buildProcessorClient();
System.out.println("Starting the processor");
processorClient.start();
System.out.println("Listening for 10 seconds...");
if (countdownLatch.await(10, TimeUnit.SECONDS)) {
System.out.println("Closing processor due to fatal error");
} else {
System.out.println("Closing processor");
}
processorClient.close();
} | class ServiceBusProcessorSample {
/**
* Main method to start the sample application.
* @param args Ignored args.
* @throws InterruptedException If the application is interrupted.
*/
} | class ServiceBusProcessorSample {
/**
* Main method to start the sample application.
* @param args Ignored args.
* @throws InterruptedException If the application is interrupted.
*/
} |
I understand that this override is done for a long time , Why is it done in first place ? If `amqpMessage.getTtl()` is positive , this override is needed ? | public static Message convertAmqpMessageToBrokeredMessage(org.apache.qpid.proton.message.Message amqpMessage, byte[] deliveryTag) {
Message brokeredMessage;
Section body = amqpMessage.getBody();
if (body != null) {
if (body instanceof Data) {
Binary messageData = ((Data) body).getValue();
brokeredMessage = new Message(Utils.fromBinay(messageData.getArray()));
} else if (body instanceof AmqpValue) {
Object messageData = ((AmqpValue) body).getValue();
brokeredMessage = new Message(MessageBody.fromValueData(messageData));
} else if (body instanceof AmqpSequence) {
List<Object> messageData = ((AmqpSequence) body).getValue();
brokeredMessage = new Message(Utils.fromSequence(messageData));
} else {
brokeredMessage = new Message();
}
} else {
brokeredMessage = new Message();
}
ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties();
if (applicationProperties != null) {
brokeredMessage.setProperties(applicationProperties.getValue());
}
brokeredMessage.setDeliveryCount(amqpMessage.getDeliveryCount() + 1);
long ttlMillis = amqpMessage.getTtl();
if (ttlMillis > 0l) {
brokeredMessage.setTimeToLive(Duration.ofMillis(ttlMillis));
}
if (amqpMessage.getCreationTime() != 0l && amqpMessage.getExpiryTime() != 0l) {
ttlMillis = amqpMessage.getExpiryTime() - amqpMessage.getCreationTime();
if (ttlMillis > 0l) {
brokeredMessage.setTimeToLive(Duration.ofMillis(ttlMillis));
}
}
Object messageId = amqpMessage.getMessageId();
if (messageId != null) {
brokeredMessage.setMessageId(messageId.toString());
}
brokeredMessage.setContentType(amqpMessage.getContentType());
Object correlationId = amqpMessage.getCorrelationId();
if (correlationId != null) {
brokeredMessage.setCorrelationId(correlationId.toString());
}
Properties properties = amqpMessage.getProperties();
if (properties != null) {
brokeredMessage.setTo(properties.getTo());
}
brokeredMessage.setLabel(amqpMessage.getSubject());
brokeredMessage.setReplyTo(amqpMessage.getReplyTo());
brokeredMessage.setReplyToSessionId(amqpMessage.getReplyToGroupId());
brokeredMessage.setSessionId(amqpMessage.getGroupId());
MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations();
if (messageAnnotations != null) {
Map<Symbol, Object> messageAnnotationsMap = messageAnnotations.getValue();
if (messageAnnotationsMap != null) {
for (Map.Entry<Symbol, Object> entry : messageAnnotationsMap.entrySet()) {
String entryName = entry.getKey().toString();
switch (entryName) {
case ClientConstants.ENQUEUEDTIMEUTCNAME:
brokeredMessage.setEnqueuedTimeUtc(((Date) entry.getValue()).toInstant());
break;
case ClientConstants.SCHEDULEDENQUEUETIMENAME:
brokeredMessage.setScheduledEnqueueTimeUtc(((Date) entry.getValue()).toInstant());
break;
case ClientConstants.SEQUENCENUBMERNAME:
brokeredMessage.setSequenceNumber((long) entry.getValue());
break;
case ClientConstants.LOCKEDUNTILNAME:
brokeredMessage.setLockedUntilUtc(((Date) entry.getValue()).toInstant());
break;
case ClientConstants.PARTITIONKEYNAME:
brokeredMessage.setPartitionKey((String) entry.getValue());
break;
case ClientConstants.VIAPARTITIONKEYNAME:
brokeredMessage.setViaPartitionKey((String) entry.getValue());
break;
case ClientConstants.DEADLETTERSOURCENAME:
brokeredMessage.setDeadLetterSource((String) entry.getValue());
break;
default:
break;
}
}
}
}
if (deliveryTag != null && deliveryTag.length == ClientConstants.LOCKTOKENSIZE) {
UUID lockToken = Util.convertDotNetBytesToUUID(deliveryTag);
brokeredMessage.setLockToken(lockToken);
} else {
brokeredMessage.setLockToken(ClientConstants.ZEROLOCKTOKEN);
}
brokeredMessage.setDeliveryTag(deliveryTag);
return brokeredMessage;
} | ttlMillis = amqpMessage.getExpiryTime() - amqpMessage.getCreationTime(); | public static Message convertAmqpMessageToBrokeredMessage(org.apache.qpid.proton.message.Message amqpMessage, byte[] deliveryTag) {
Message brokeredMessage;
Section body = amqpMessage.getBody();
if (body != null) {
if (body instanceof Data) {
Binary messageData = ((Data) body).getValue();
brokeredMessage = new Message(Utils.fromBinay(messageData.getArray()));
} else if (body instanceof AmqpValue) {
Object messageData = ((AmqpValue) body).getValue();
brokeredMessage = new Message(MessageBody.fromValueData(messageData));
} else if (body instanceof AmqpSequence) {
List<Object> messageData = ((AmqpSequence) body).getValue();
brokeredMessage = new Message(Utils.fromSequence(messageData));
} else {
brokeredMessage = new Message();
}
} else {
brokeredMessage = new Message();
}
ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties();
if (applicationProperties != null) {
brokeredMessage.setProperties(applicationProperties.getValue());
}
brokeredMessage.setDeliveryCount(amqpMessage.getDeliveryCount() + 1);
long ttlMillis = amqpMessage.getTtl();
if (ttlMillis > 0l) {
brokeredMessage.setTimeToLive(Duration.ofMillis(ttlMillis));
}
if (amqpMessage.getCreationTime() != 0l && amqpMessage.getExpiryTime() != 0l) {
ttlMillis = amqpMessage.getExpiryTime() - amqpMessage.getCreationTime();
if (ttlMillis > 0l) {
brokeredMessage.setTimeToLive(Duration.ofMillis(ttlMillis));
}
}
Object messageId = amqpMessage.getMessageId();
if (messageId != null) {
brokeredMessage.setMessageId(messageId.toString());
}
brokeredMessage.setContentType(amqpMessage.getContentType());
Object correlationId = amqpMessage.getCorrelationId();
if (correlationId != null) {
brokeredMessage.setCorrelationId(correlationId.toString());
}
Properties properties = amqpMessage.getProperties();
if (properties != null) {
brokeredMessage.setTo(properties.getTo());
}
brokeredMessage.setLabel(amqpMessage.getSubject());
brokeredMessage.setReplyTo(amqpMessage.getReplyTo());
brokeredMessage.setReplyToSessionId(amqpMessage.getReplyToGroupId());
brokeredMessage.setSessionId(amqpMessage.getGroupId());
MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations();
if (messageAnnotations != null) {
Map<Symbol, Object> messageAnnotationsMap = messageAnnotations.getValue();
if (messageAnnotationsMap != null) {
for (Map.Entry<Symbol, Object> entry : messageAnnotationsMap.entrySet()) {
String entryName = entry.getKey().toString();
switch (entryName) {
case ClientConstants.ENQUEUEDTIMEUTCNAME:
brokeredMessage.setEnqueuedTimeUtc(((Date) entry.getValue()).toInstant());
break;
case ClientConstants.SCHEDULEDENQUEUETIMENAME:
brokeredMessage.setScheduledEnqueueTimeUtc(((Date) entry.getValue()).toInstant());
break;
case ClientConstants.SEQUENCENUBMERNAME:
brokeredMessage.setSequenceNumber((long) entry.getValue());
break;
case ClientConstants.LOCKEDUNTILNAME:
brokeredMessage.setLockedUntilUtc(((Date) entry.getValue()).toInstant());
break;
case ClientConstants.PARTITIONKEYNAME:
brokeredMessage.setPartitionKey((String) entry.getValue());
break;
case ClientConstants.VIAPARTITIONKEYNAME:
brokeredMessage.setViaPartitionKey((String) entry.getValue());
break;
case ClientConstants.DEADLETTERSOURCENAME:
brokeredMessage.setDeadLetterSource((String) entry.getValue());
break;
default:
break;
}
}
}
}
if (deliveryTag != null && deliveryTag.length == ClientConstants.LOCKTOKENSIZE) {
UUID lockToken = Util.convertDotNetBytesToUUID(deliveryTag);
brokeredMessage.setLockToken(lockToken);
} else {
brokeredMessage.setLockToken(ClientConstants.ZEROLOCKTOKEN);
}
brokeredMessage.setDeliveryTag(deliveryTag);
return brokeredMessage;
} | class MessageConverter {
public static org.apache.qpid.proton.message.Message convertBrokeredMessageToAmqpMessage(Message brokeredMessage) {
org.apache.qpid.proton.message.Message amqpMessage = Proton.message();
MessageBody body = brokeredMessage.getMessageBody();
if (body != null) {
if (body.getBodyType() == MessageBodyType.VALUE) {
amqpMessage.setBody(new AmqpValue(body.getValueData()));
} else if (body.getBodyType() == MessageBodyType.SEQUENCE) {
amqpMessage.setBody(new AmqpSequence(Utils.getSequenceFromMessageBody(body)));
} else {
amqpMessage.setBody(new Data(new Binary(Utils.getDataFromMessageBody(body))));
}
}
if (brokeredMessage.getProperties() != null) {
amqpMessage.setApplicationProperties(new ApplicationProperties(brokeredMessage.getProperties()));
}
if (brokeredMessage.getTimeToLive() != null) {
long ttlMillis = brokeredMessage.getTimeToLive().toMillis();
if (ttlMillis > ClientConstants.UNSIGNED_INT_MAX_VALUE) {
ttlMillis = ClientConstants.UNSIGNED_INT_MAX_VALUE;
}
amqpMessage.setTtl(ttlMillis);
Instant creationTime = Instant.now();
Instant absoluteExpiryTime = creationTime.plus(brokeredMessage.getTimeToLive());
amqpMessage.setCreationTime(creationTime.toEpochMilli());
amqpMessage.setExpiryTime(absoluteExpiryTime.toEpochMilli());
}
amqpMessage.setMessageId(brokeredMessage.getMessageId());
amqpMessage.setContentType(brokeredMessage.getContentType());
amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId());
amqpMessage.setSubject(brokeredMessage.getLabel());
amqpMessage.getProperties().setTo(brokeredMessage.getTo());
amqpMessage.setReplyTo(brokeredMessage.getReplyTo());
amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId());
amqpMessage.setGroupId(brokeredMessage.getSessionId());
Map<Symbol, Object> messageAnnotationsMap = new HashMap<>();
if (brokeredMessage.getScheduledEnqueueTimeUtc() != null) {
messageAnnotationsMap.put(Symbol.valueOf(ClientConstants.SCHEDULEDENQUEUETIMENAME), Date.from(brokeredMessage.getScheduledEnqueueTimeUtc()));
}
if (!StringUtil.isNullOrEmpty(brokeredMessage.getPartitionKey())) {
messageAnnotationsMap.put(Symbol.valueOf(ClientConstants.PARTITIONKEYNAME), brokeredMessage.getPartitionKey());
}
if (!StringUtil.isNullOrEmpty(brokeredMessage.getViaPartitionKey())) {
messageAnnotationsMap.put(Symbol.valueOf(ClientConstants.VIAPARTITIONKEYNAME), brokeredMessage.getViaPartitionKey());
}
amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap));
return amqpMessage;
}
public static Message convertAmqpMessageToBrokeredMessage(org.apache.qpid.proton.message.Message amqpMessage) {
return convertAmqpMessageToBrokeredMessage(amqpMessage, null);
}
public static Message convertAmqpMessageToBrokeredMessage(MessageWithDeliveryTag amqpMessageWithDeliveryTag) {
org.apache.qpid.proton.message.Message amqpMessage = amqpMessageWithDeliveryTag.getMessage();
byte[] deliveryTag = amqpMessageWithDeliveryTag.getDeliveryTag();
return convertAmqpMessageToBrokeredMessage(amqpMessage, deliveryTag);
}
public static Message convertAmqpMessageToBrokeredMessage(MessageWithLockToken amqpMessageWithLockToken) {
Message convertedMessage = convertAmqpMessageToBrokeredMessage(amqpMessageWithLockToken.getMessage(), null);
convertedMessage.setLockToken(amqpMessageWithLockToken.getLockToken());
return convertedMessage;
}
} | class MessageConverter {
public static org.apache.qpid.proton.message.Message convertBrokeredMessageToAmqpMessage(Message brokeredMessage) {
org.apache.qpid.proton.message.Message amqpMessage = Proton.message();
MessageBody body = brokeredMessage.getMessageBody();
if (body != null) {
if (body.getBodyType() == MessageBodyType.VALUE) {
amqpMessage.setBody(new AmqpValue(body.getValueData()));
} else if (body.getBodyType() == MessageBodyType.SEQUENCE) {
amqpMessage.setBody(new AmqpSequence(Utils.getSequenceFromMessageBody(body)));
} else {
amqpMessage.setBody(new Data(new Binary(Utils.getDataFromMessageBody(body))));
}
}
if (brokeredMessage.getProperties() != null) {
amqpMessage.setApplicationProperties(new ApplicationProperties(brokeredMessage.getProperties()));
}
if (brokeredMessage.getTimeToLive() != null) {
long ttlMillis = brokeredMessage.getTimeToLive().toMillis();
if (ttlMillis > ClientConstants.UNSIGNED_INT_MAX_VALUE) {
ttlMillis = ClientConstants.UNSIGNED_INT_MAX_VALUE;
}
amqpMessage.setTtl(ttlMillis);
Instant creationTime = Instant.now();
Instant absoluteExpiryTime = creationTime.plus(brokeredMessage.getTimeToLive());
amqpMessage.setCreationTime(creationTime.toEpochMilli());
amqpMessage.setExpiryTime(absoluteExpiryTime.toEpochMilli());
}
amqpMessage.setMessageId(brokeredMessage.getMessageId());
amqpMessage.setContentType(brokeredMessage.getContentType());
amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId());
amqpMessage.setSubject(brokeredMessage.getLabel());
amqpMessage.getProperties().setTo(brokeredMessage.getTo());
amqpMessage.setReplyTo(brokeredMessage.getReplyTo());
amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId());
amqpMessage.setGroupId(brokeredMessage.getSessionId());
Map<Symbol, Object> messageAnnotationsMap = new HashMap<>();
if (brokeredMessage.getScheduledEnqueueTimeUtc() != null) {
messageAnnotationsMap.put(Symbol.valueOf(ClientConstants.SCHEDULEDENQUEUETIMENAME), Date.from(brokeredMessage.getScheduledEnqueueTimeUtc()));
}
if (!StringUtil.isNullOrEmpty(brokeredMessage.getPartitionKey())) {
messageAnnotationsMap.put(Symbol.valueOf(ClientConstants.PARTITIONKEYNAME), brokeredMessage.getPartitionKey());
}
if (!StringUtil.isNullOrEmpty(brokeredMessage.getViaPartitionKey())) {
messageAnnotationsMap.put(Symbol.valueOf(ClientConstants.VIAPARTITIONKEYNAME), brokeredMessage.getViaPartitionKey());
}
amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap));
return amqpMessage;
}
public static Message convertAmqpMessageToBrokeredMessage(org.apache.qpid.proton.message.Message amqpMessage) {
return convertAmqpMessageToBrokeredMessage(amqpMessage, null);
}
public static Message convertAmqpMessageToBrokeredMessage(MessageWithDeliveryTag amqpMessageWithDeliveryTag) {
org.apache.qpid.proton.message.Message amqpMessage = amqpMessageWithDeliveryTag.getMessage();
byte[] deliveryTag = amqpMessageWithDeliveryTag.getDeliveryTag();
return convertAmqpMessageToBrokeredMessage(amqpMessage, deliveryTag);
}
public static Message convertAmqpMessageToBrokeredMessage(MessageWithLockToken amqpMessageWithLockToken) {
Message convertedMessage = convertAmqpMessageToBrokeredMessage(amqpMessageWithLockToken.getMessage(), null);
convertedMessage.setLockToken(amqpMessageWithLockToken.getLockToken());
return convertedMessage;
}
} |
Needed because ttl is milliseconds as integer, so it can't support time to live of more than 50 days. That's why we had to change it. | public static Message convertAmqpMessageToBrokeredMessage(org.apache.qpid.proton.message.Message amqpMessage, byte[] deliveryTag) {
Message brokeredMessage;
Section body = amqpMessage.getBody();
if (body != null) {
if (body instanceof Data) {
Binary messageData = ((Data) body).getValue();
brokeredMessage = new Message(Utils.fromBinay(messageData.getArray()));
} else if (body instanceof AmqpValue) {
Object messageData = ((AmqpValue) body).getValue();
brokeredMessage = new Message(MessageBody.fromValueData(messageData));
} else if (body instanceof AmqpSequence) {
List<Object> messageData = ((AmqpSequence) body).getValue();
brokeredMessage = new Message(Utils.fromSequence(messageData));
} else {
brokeredMessage = new Message();
}
} else {
brokeredMessage = new Message();
}
ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties();
if (applicationProperties != null) {
brokeredMessage.setProperties(applicationProperties.getValue());
}
brokeredMessage.setDeliveryCount(amqpMessage.getDeliveryCount() + 1);
long ttlMillis = amqpMessage.getTtl();
if (ttlMillis > 0l) {
brokeredMessage.setTimeToLive(Duration.ofMillis(ttlMillis));
}
if (amqpMessage.getCreationTime() != 0l && amqpMessage.getExpiryTime() != 0l) {
ttlMillis = amqpMessage.getExpiryTime() - amqpMessage.getCreationTime();
if (ttlMillis > 0l) {
brokeredMessage.setTimeToLive(Duration.ofMillis(ttlMillis));
}
}
Object messageId = amqpMessage.getMessageId();
if (messageId != null) {
brokeredMessage.setMessageId(messageId.toString());
}
brokeredMessage.setContentType(amqpMessage.getContentType());
Object correlationId = amqpMessage.getCorrelationId();
if (correlationId != null) {
brokeredMessage.setCorrelationId(correlationId.toString());
}
Properties properties = amqpMessage.getProperties();
if (properties != null) {
brokeredMessage.setTo(properties.getTo());
}
brokeredMessage.setLabel(amqpMessage.getSubject());
brokeredMessage.setReplyTo(amqpMessage.getReplyTo());
brokeredMessage.setReplyToSessionId(amqpMessage.getReplyToGroupId());
brokeredMessage.setSessionId(amqpMessage.getGroupId());
MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations();
if (messageAnnotations != null) {
Map<Symbol, Object> messageAnnotationsMap = messageAnnotations.getValue();
if (messageAnnotationsMap != null) {
for (Map.Entry<Symbol, Object> entry : messageAnnotationsMap.entrySet()) {
String entryName = entry.getKey().toString();
switch (entryName) {
case ClientConstants.ENQUEUEDTIMEUTCNAME:
brokeredMessage.setEnqueuedTimeUtc(((Date) entry.getValue()).toInstant());
break;
case ClientConstants.SCHEDULEDENQUEUETIMENAME:
brokeredMessage.setScheduledEnqueueTimeUtc(((Date) entry.getValue()).toInstant());
break;
case ClientConstants.SEQUENCENUBMERNAME:
brokeredMessage.setSequenceNumber((long) entry.getValue());
break;
case ClientConstants.LOCKEDUNTILNAME:
brokeredMessage.setLockedUntilUtc(((Date) entry.getValue()).toInstant());
break;
case ClientConstants.PARTITIONKEYNAME:
brokeredMessage.setPartitionKey((String) entry.getValue());
break;
case ClientConstants.VIAPARTITIONKEYNAME:
brokeredMessage.setViaPartitionKey((String) entry.getValue());
break;
case ClientConstants.DEADLETTERSOURCENAME:
brokeredMessage.setDeadLetterSource((String) entry.getValue());
break;
default:
break;
}
}
}
}
if (deliveryTag != null && deliveryTag.length == ClientConstants.LOCKTOKENSIZE) {
UUID lockToken = Util.convertDotNetBytesToUUID(deliveryTag);
brokeredMessage.setLockToken(lockToken);
} else {
brokeredMessage.setLockToken(ClientConstants.ZEROLOCKTOKEN);
}
brokeredMessage.setDeliveryTag(deliveryTag);
return brokeredMessage;
} | ttlMillis = amqpMessage.getExpiryTime() - amqpMessage.getCreationTime(); | public static Message convertAmqpMessageToBrokeredMessage(org.apache.qpid.proton.message.Message amqpMessage, byte[] deliveryTag) {
Message brokeredMessage;
Section body = amqpMessage.getBody();
if (body != null) {
if (body instanceof Data) {
Binary messageData = ((Data) body).getValue();
brokeredMessage = new Message(Utils.fromBinay(messageData.getArray()));
} else if (body instanceof AmqpValue) {
Object messageData = ((AmqpValue) body).getValue();
brokeredMessage = new Message(MessageBody.fromValueData(messageData));
} else if (body instanceof AmqpSequence) {
List<Object> messageData = ((AmqpSequence) body).getValue();
brokeredMessage = new Message(Utils.fromSequence(messageData));
} else {
brokeredMessage = new Message();
}
} else {
brokeredMessage = new Message();
}
ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties();
if (applicationProperties != null) {
brokeredMessage.setProperties(applicationProperties.getValue());
}
brokeredMessage.setDeliveryCount(amqpMessage.getDeliveryCount() + 1);
long ttlMillis = amqpMessage.getTtl();
if (ttlMillis > 0l) {
brokeredMessage.setTimeToLive(Duration.ofMillis(ttlMillis));
}
if (amqpMessage.getCreationTime() != 0l && amqpMessage.getExpiryTime() != 0l) {
ttlMillis = amqpMessage.getExpiryTime() - amqpMessage.getCreationTime();
if (ttlMillis > 0l) {
brokeredMessage.setTimeToLive(Duration.ofMillis(ttlMillis));
}
}
Object messageId = amqpMessage.getMessageId();
if (messageId != null) {
brokeredMessage.setMessageId(messageId.toString());
}
brokeredMessage.setContentType(amqpMessage.getContentType());
Object correlationId = amqpMessage.getCorrelationId();
if (correlationId != null) {
brokeredMessage.setCorrelationId(correlationId.toString());
}
Properties properties = amqpMessage.getProperties();
if (properties != null) {
brokeredMessage.setTo(properties.getTo());
}
brokeredMessage.setLabel(amqpMessage.getSubject());
brokeredMessage.setReplyTo(amqpMessage.getReplyTo());
brokeredMessage.setReplyToSessionId(amqpMessage.getReplyToGroupId());
brokeredMessage.setSessionId(amqpMessage.getGroupId());
MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations();
if (messageAnnotations != null) {
Map<Symbol, Object> messageAnnotationsMap = messageAnnotations.getValue();
if (messageAnnotationsMap != null) {
for (Map.Entry<Symbol, Object> entry : messageAnnotationsMap.entrySet()) {
String entryName = entry.getKey().toString();
switch (entryName) {
case ClientConstants.ENQUEUEDTIMEUTCNAME:
brokeredMessage.setEnqueuedTimeUtc(((Date) entry.getValue()).toInstant());
break;
case ClientConstants.SCHEDULEDENQUEUETIMENAME:
brokeredMessage.setScheduledEnqueueTimeUtc(((Date) entry.getValue()).toInstant());
break;
case ClientConstants.SEQUENCENUBMERNAME:
brokeredMessage.setSequenceNumber((long) entry.getValue());
break;
case ClientConstants.LOCKEDUNTILNAME:
brokeredMessage.setLockedUntilUtc(((Date) entry.getValue()).toInstant());
break;
case ClientConstants.PARTITIONKEYNAME:
brokeredMessage.setPartitionKey((String) entry.getValue());
break;
case ClientConstants.VIAPARTITIONKEYNAME:
brokeredMessage.setViaPartitionKey((String) entry.getValue());
break;
case ClientConstants.DEADLETTERSOURCENAME:
brokeredMessage.setDeadLetterSource((String) entry.getValue());
break;
default:
break;
}
}
}
}
if (deliveryTag != null && deliveryTag.length == ClientConstants.LOCKTOKENSIZE) {
UUID lockToken = Util.convertDotNetBytesToUUID(deliveryTag);
brokeredMessage.setLockToken(lockToken);
} else {
brokeredMessage.setLockToken(ClientConstants.ZEROLOCKTOKEN);
}
brokeredMessage.setDeliveryTag(deliveryTag);
return brokeredMessage;
} | class MessageConverter {
public static org.apache.qpid.proton.message.Message convertBrokeredMessageToAmqpMessage(Message brokeredMessage) {
org.apache.qpid.proton.message.Message amqpMessage = Proton.message();
MessageBody body = brokeredMessage.getMessageBody();
if (body != null) {
if (body.getBodyType() == MessageBodyType.VALUE) {
amqpMessage.setBody(new AmqpValue(body.getValueData()));
} else if (body.getBodyType() == MessageBodyType.SEQUENCE) {
amqpMessage.setBody(new AmqpSequence(Utils.getSequenceFromMessageBody(body)));
} else {
amqpMessage.setBody(new Data(new Binary(Utils.getDataFromMessageBody(body))));
}
}
if (brokeredMessage.getProperties() != null) {
amqpMessage.setApplicationProperties(new ApplicationProperties(brokeredMessage.getProperties()));
}
if (brokeredMessage.getTimeToLive() != null) {
long ttlMillis = brokeredMessage.getTimeToLive().toMillis();
if (ttlMillis > ClientConstants.UNSIGNED_INT_MAX_VALUE) {
ttlMillis = ClientConstants.UNSIGNED_INT_MAX_VALUE;
}
amqpMessage.setTtl(ttlMillis);
Instant creationTime = Instant.now();
Instant absoluteExpiryTime = creationTime.plus(brokeredMessage.getTimeToLive());
amqpMessage.setCreationTime(creationTime.toEpochMilli());
amqpMessage.setExpiryTime(absoluteExpiryTime.toEpochMilli());
}
amqpMessage.setMessageId(brokeredMessage.getMessageId());
amqpMessage.setContentType(brokeredMessage.getContentType());
amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId());
amqpMessage.setSubject(brokeredMessage.getLabel());
amqpMessage.getProperties().setTo(brokeredMessage.getTo());
amqpMessage.setReplyTo(brokeredMessage.getReplyTo());
amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId());
amqpMessage.setGroupId(brokeredMessage.getSessionId());
Map<Symbol, Object> messageAnnotationsMap = new HashMap<>();
if (brokeredMessage.getScheduledEnqueueTimeUtc() != null) {
messageAnnotationsMap.put(Symbol.valueOf(ClientConstants.SCHEDULEDENQUEUETIMENAME), Date.from(brokeredMessage.getScheduledEnqueueTimeUtc()));
}
if (!StringUtil.isNullOrEmpty(brokeredMessage.getPartitionKey())) {
messageAnnotationsMap.put(Symbol.valueOf(ClientConstants.PARTITIONKEYNAME), brokeredMessage.getPartitionKey());
}
if (!StringUtil.isNullOrEmpty(brokeredMessage.getViaPartitionKey())) {
messageAnnotationsMap.put(Symbol.valueOf(ClientConstants.VIAPARTITIONKEYNAME), brokeredMessage.getViaPartitionKey());
}
amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap));
return amqpMessage;
}
public static Message convertAmqpMessageToBrokeredMessage(org.apache.qpid.proton.message.Message amqpMessage) {
return convertAmqpMessageToBrokeredMessage(amqpMessage, null);
}
public static Message convertAmqpMessageToBrokeredMessage(MessageWithDeliveryTag amqpMessageWithDeliveryTag) {
org.apache.qpid.proton.message.Message amqpMessage = amqpMessageWithDeliveryTag.getMessage();
byte[] deliveryTag = amqpMessageWithDeliveryTag.getDeliveryTag();
return convertAmqpMessageToBrokeredMessage(amqpMessage, deliveryTag);
}
public static Message convertAmqpMessageToBrokeredMessage(MessageWithLockToken amqpMessageWithLockToken) {
Message convertedMessage = convertAmqpMessageToBrokeredMessage(amqpMessageWithLockToken.getMessage(), null);
convertedMessage.setLockToken(amqpMessageWithLockToken.getLockToken());
return convertedMessage;
}
} | class MessageConverter {
public static org.apache.qpid.proton.message.Message convertBrokeredMessageToAmqpMessage(Message brokeredMessage) {
org.apache.qpid.proton.message.Message amqpMessage = Proton.message();
MessageBody body = brokeredMessage.getMessageBody();
if (body != null) {
if (body.getBodyType() == MessageBodyType.VALUE) {
amqpMessage.setBody(new AmqpValue(body.getValueData()));
} else if (body.getBodyType() == MessageBodyType.SEQUENCE) {
amqpMessage.setBody(new AmqpSequence(Utils.getSequenceFromMessageBody(body)));
} else {
amqpMessage.setBody(new Data(new Binary(Utils.getDataFromMessageBody(body))));
}
}
if (brokeredMessage.getProperties() != null) {
amqpMessage.setApplicationProperties(new ApplicationProperties(brokeredMessage.getProperties()));
}
if (brokeredMessage.getTimeToLive() != null) {
long ttlMillis = brokeredMessage.getTimeToLive().toMillis();
if (ttlMillis > ClientConstants.UNSIGNED_INT_MAX_VALUE) {
ttlMillis = ClientConstants.UNSIGNED_INT_MAX_VALUE;
}
amqpMessage.setTtl(ttlMillis);
Instant creationTime = Instant.now();
Instant absoluteExpiryTime = creationTime.plus(brokeredMessage.getTimeToLive());
amqpMessage.setCreationTime(creationTime.toEpochMilli());
amqpMessage.setExpiryTime(absoluteExpiryTime.toEpochMilli());
}
amqpMessage.setMessageId(brokeredMessage.getMessageId());
amqpMessage.setContentType(brokeredMessage.getContentType());
amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId());
amqpMessage.setSubject(brokeredMessage.getLabel());
amqpMessage.getProperties().setTo(brokeredMessage.getTo());
amqpMessage.setReplyTo(brokeredMessage.getReplyTo());
amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId());
amqpMessage.setGroupId(brokeredMessage.getSessionId());
Map<Symbol, Object> messageAnnotationsMap = new HashMap<>();
if (brokeredMessage.getScheduledEnqueueTimeUtc() != null) {
messageAnnotationsMap.put(Symbol.valueOf(ClientConstants.SCHEDULEDENQUEUETIMENAME), Date.from(brokeredMessage.getScheduledEnqueueTimeUtc()));
}
if (!StringUtil.isNullOrEmpty(brokeredMessage.getPartitionKey())) {
messageAnnotationsMap.put(Symbol.valueOf(ClientConstants.PARTITIONKEYNAME), brokeredMessage.getPartitionKey());
}
if (!StringUtil.isNullOrEmpty(brokeredMessage.getViaPartitionKey())) {
messageAnnotationsMap.put(Symbol.valueOf(ClientConstants.VIAPARTITIONKEYNAME), brokeredMessage.getViaPartitionKey());
}
amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap));
return amqpMessage;
}
public static Message convertAmqpMessageToBrokeredMessage(org.apache.qpid.proton.message.Message amqpMessage) {
return convertAmqpMessageToBrokeredMessage(amqpMessage, null);
}
public static Message convertAmqpMessageToBrokeredMessage(MessageWithDeliveryTag amqpMessageWithDeliveryTag) {
org.apache.qpid.proton.message.Message amqpMessage = amqpMessageWithDeliveryTag.getMessage();
byte[] deliveryTag = amqpMessageWithDeliveryTag.getDeliveryTag();
return convertAmqpMessageToBrokeredMessage(amqpMessage, deliveryTag);
}
public static Message convertAmqpMessageToBrokeredMessage(MessageWithLockToken amqpMessageWithLockToken) {
Message convertedMessage = convertAmqpMessageToBrokeredMessage(amqpMessageWithLockToken.getMessage(), null);
convertedMessage.setLockToken(amqpMessageWithLockToken.getLockToken());
return convertedMessage;
}
} |
Why is this check required? Can this method be called multiple times? If yes, what's the impact of trying to close the context each time? | private void readTimeoutRunnable(ChannelHandlerContext ctx) {
if ((timeoutMillis - (System.currentTimeMillis() - lastReadMillis)) > 0) {
return;
}
if (!closed) {
ctx.fireExceptionCaught(new TimeoutException(String.format(READ_TIMED_OUT_MESSAGE, timeoutMillis)));
ctx.close();
closed = true;
}
} | if (!closed) { | private void readTimeoutRunnable(ChannelHandlerContext ctx) {
if ((timeoutMillis - (System.currentTimeMillis() - lastReadMillis)) > 0) {
return;
}
if (!closed) {
ctx.fireExceptionCaught(new TimeoutException(String.format(READ_TIMED_OUT_MESSAGE, timeoutMillis)));
ctx.close();
closed = true;
}
} | class ReadTimeoutHandler extends ChannelInboundHandlerAdapter {
/**
* Name of the handler when it is added into a ChannelPipeline.
*/
public static final String HANDLER_NAME = "azureReadTimeoutHandler";
private static final String READ_TIMED_OUT_MESSAGE = "Channel read timed out after %d milliseconds.";
private final long timeoutMillis;
private boolean closed;
private long lastReadMillis;
private ScheduledFuture<?> readTimeoutWatcher;
/**
* Constructs a channel handler that watches channel read operations to ensure they aren't timing out.
*
* @param timeoutMillis The period of milliseconds when read progress has stopped before a channel is considered
* timed out.
*/
public ReadTimeoutHandler(long timeoutMillis) {
this.timeoutMillis = timeoutMillis;
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) {
this.lastReadMillis = System.currentTimeMillis();
ctx.fireChannelReadComplete();
}
@Override
public void handlerAdded(ChannelHandlerContext ctx) {
if (timeoutMillis > 0) {
this.readTimeoutWatcher = ctx.executor().scheduleAtFixedRate(() -> readTimeoutRunnable(ctx),
timeoutMillis, timeoutMillis, TimeUnit.MILLISECONDS);
}
}
@Override
public void handlerRemoved(ChannelHandlerContext ctx) {
if (readTimeoutWatcher != null && !readTimeoutWatcher.isDone()) {
readTimeoutWatcher.cancel(false);
readTimeoutWatcher = null;
}
}
} | class ReadTimeoutHandler extends ChannelInboundHandlerAdapter {
/**
* Name of the handler when it is added into a ChannelPipeline.
*/
public static final String HANDLER_NAME = "azureReadTimeoutHandler";
private static final String READ_TIMED_OUT_MESSAGE = "Channel read timed out after %d milliseconds.";
private final long timeoutMillis;
private boolean closed;
private long lastReadMillis;
private ScheduledFuture<?> readTimeoutWatcher;
/**
* Constructs a channel handler that watches channel read operations to ensure they aren't timing out.
*
* @param timeoutMillis The period of milliseconds when read progress has stopped before a channel is considered
* timed out.
*/
public ReadTimeoutHandler(long timeoutMillis) {
this.timeoutMillis = timeoutMillis;
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) {
this.lastReadMillis = System.currentTimeMillis();
ctx.fireChannelReadComplete();
}
@Override
public void handlerAdded(ChannelHandlerContext ctx) {
if (timeoutMillis > 0) {
this.readTimeoutWatcher = ctx.executor().scheduleAtFixedRate(() -> readTimeoutRunnable(ctx),
timeoutMillis, timeoutMillis, TimeUnit.MILLISECONDS);
}
}
@Override
public void handlerRemoved(ChannelHandlerContext ctx) {
if (readTimeoutWatcher != null && !readTimeoutWatcher.isDone()) {
readTimeoutWatcher.cancel(false);
readTimeoutWatcher = null;
}
}
} |
Looked into the safety of calling close multiple times and the answer is unknown as this will trigger the close handler for other handlers, so it would be dependent on the configuration of the Netty pipeline. In our default instance it is safe to call multiple times but that isn't a guarantee. | private void readTimeoutRunnable(ChannelHandlerContext ctx) {
if ((timeoutMillis - (System.currentTimeMillis() - lastReadMillis)) > 0) {
return;
}
if (!closed) {
ctx.fireExceptionCaught(new TimeoutException(String.format(READ_TIMED_OUT_MESSAGE, timeoutMillis)));
ctx.close();
closed = true;
}
} | if (!closed) { | private void readTimeoutRunnable(ChannelHandlerContext ctx) {
if ((timeoutMillis - (System.currentTimeMillis() - lastReadMillis)) > 0) {
return;
}
if (!closed) {
ctx.fireExceptionCaught(new TimeoutException(String.format(READ_TIMED_OUT_MESSAGE, timeoutMillis)));
ctx.close();
closed = true;
}
} | class ReadTimeoutHandler extends ChannelInboundHandlerAdapter {
/**
* Name of the handler when it is added into a ChannelPipeline.
*/
public static final String HANDLER_NAME = "azureReadTimeoutHandler";
private static final String READ_TIMED_OUT_MESSAGE = "Channel read timed out after %d milliseconds.";
private final long timeoutMillis;
private boolean closed;
private long lastReadMillis;
private ScheduledFuture<?> readTimeoutWatcher;
/**
* Constructs a channel handler that watches channel read operations to ensure they aren't timing out.
*
* @param timeoutMillis The period of milliseconds when read progress has stopped before a channel is considered
* timed out.
*/
public ReadTimeoutHandler(long timeoutMillis) {
this.timeoutMillis = timeoutMillis;
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) {
this.lastReadMillis = System.currentTimeMillis();
ctx.fireChannelReadComplete();
}
@Override
public void handlerAdded(ChannelHandlerContext ctx) {
if (timeoutMillis > 0) {
this.readTimeoutWatcher = ctx.executor().scheduleAtFixedRate(() -> readTimeoutRunnable(ctx),
timeoutMillis, timeoutMillis, TimeUnit.MILLISECONDS);
}
}
@Override
public void handlerRemoved(ChannelHandlerContext ctx) {
if (readTimeoutWatcher != null && !readTimeoutWatcher.isDone()) {
readTimeoutWatcher.cancel(false);
readTimeoutWatcher = null;
}
}
} | class ReadTimeoutHandler extends ChannelInboundHandlerAdapter {
/**
* Name of the handler when it is added into a ChannelPipeline.
*/
public static final String HANDLER_NAME = "azureReadTimeoutHandler";
private static final String READ_TIMED_OUT_MESSAGE = "Channel read timed out after %d milliseconds.";
private final long timeoutMillis;
private boolean closed;
private long lastReadMillis;
private ScheduledFuture<?> readTimeoutWatcher;
/**
* Constructs a channel handler that watches channel read operations to ensure they aren't timing out.
*
* @param timeoutMillis The period of milliseconds when read progress has stopped before a channel is considered
* timed out.
*/
public ReadTimeoutHandler(long timeoutMillis) {
this.timeoutMillis = timeoutMillis;
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) {
this.lastReadMillis = System.currentTimeMillis();
ctx.fireChannelReadComplete();
}
@Override
public void handlerAdded(ChannelHandlerContext ctx) {
if (timeoutMillis > 0) {
this.readTimeoutWatcher = ctx.executor().scheduleAtFixedRate(() -> readTimeoutRunnable(ctx),
timeoutMillis, timeoutMillis, TimeUnit.MILLISECONDS);
}
}
@Override
public void handlerRemoved(ChannelHandlerContext ctx) {
if (readTimeoutWatcher != null && !readTimeoutWatcher.isDone()) {
readTimeoutWatcher.cancel(false);
readTimeoutWatcher = null;
}
}
} |
Should move this try/catch deeper into the call stack, either the public overload or in the package-private method so that we don't need to put it in multiple locations. | public PagedFlux<PathItem> listPaths() {
try {
return this.listPaths(false, false, null);
} catch (RuntimeException ex) {
return pagedFluxError(logger, ex);
}
} | } catch (RuntimeException ex) { | public PagedFlux<PathItem> listPaths() {
return this.listPaths(false, false, null);
} | class DataLakeDirectoryAsyncClient extends DataLakePathAsyncClient {
private final ClientLogger logger = new ClientLogger(DataLakeDirectoryAsyncClient.class);
/**
* Package-private constructor for use by {@link DataLakePathClientBuilder}.
*
* @param pipeline The pipeline used to send and receive service requests.
* @param url The endpoint where to send service requests.
* @param serviceVersion The version of the service to receive requests.
* @param accountName The storage account name.
* @param fileSystemName The file system name.
* @param directoryName The directory name.
* @param blockBlobAsyncClient The underlying {@link BlobContainerAsyncClient}
*/
DataLakeDirectoryAsyncClient(HttpPipeline pipeline, String url, DataLakeServiceVersion serviceVersion,
String accountName, String fileSystemName, String directoryName, BlockBlobAsyncClient blockBlobAsyncClient) {
super(pipeline, url, serviceVersion, accountName, fileSystemName, directoryName, PathResourceType.DIRECTORY,
blockBlobAsyncClient);
}
DataLakeDirectoryAsyncClient(DataLakePathAsyncClient dataLakePathAsyncClient) {
super(dataLakePathAsyncClient.getHttpPipeline(), dataLakePathAsyncClient.getPathUrl(),
dataLakePathAsyncClient.getServiceVersion(), dataLakePathAsyncClient.getAccountName(),
dataLakePathAsyncClient.getFileSystemName(), dataLakePathAsyncClient.pathName,
PathResourceType.DIRECTORY, dataLakePathAsyncClient.getBlockBlobAsyncClient());
}
/**
* Gets the URL of the directory represented by this client on the Data Lake service.
*
* @return the URL.
*/
public String getDirectoryUrl() {
return getPathUrl();
}
/**
* Gets the path of this directory, not including the name of the resource itself.
*
* @return The path of the directory.
*/
public String getDirectoryPath() {
return getObjectPath();
}
/**
* Gets the name of this directory, not including its full path.
*
* @return The name of the directory.
*/
public String getDirectoryName() {
return getObjectName();
}
/**
* Deletes a directory.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.delete}
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @return A reactive response signalling completion.
*/
public Mono<Void> delete() {
try {
return deleteWithResponse(false, null).flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Deletes a directory.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param recursive Whether or not to delete all paths beneath the directory.
* @param requestConditions {@link DataLakeRequestConditions}
*
* @return A reactive response signalling completion.
*/
public Mono<Response<Void>> deleteWithResponse(boolean recursive, DataLakeRequestConditions requestConditions) {
try {
return withContext(context -> deleteWithResponse(recursive, requestConditions, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new DataLakeFileAsyncClient object by concatenating fileName to the end of
* DataLakeDirectoryAsyncClient's URL. The new DataLakeFileAsyncClient uses the same request policy pipeline as the
* DataLakeDirectoryAsyncClient.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.getFileAsyncClient
*
* @param fileName A {@code String} representing the name of the file.
* @return A new {@link DataLakeFileAsyncClient} object which references the file with the specified name in this
* file system.
*/
public DataLakeFileAsyncClient getFileAsyncClient(String fileName) {
Objects.requireNonNull(fileName, "'fileName' can not be set to null");
BlockBlobAsyncClient blockBlobAsyncClient = prepareBuilderAppendPath(fileName).buildBlockBlobAsyncClient();
return new DataLakeFileAsyncClient(getHttpPipeline(),
StorageImplUtils.appendToUrlPath(getPathUrl(), Utility.urlEncode(Utility.urlDecode(fileName))).toString(),
getServiceVersion(), getAccountName(), getFileSystemName(), getObjectPath() + "/"
+ Utility.urlDecode(fileName), blockBlobAsyncClient);
}
/**
* Creates a new file within a directory. By default this method will not overwrite an existing file.
* For more information, see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFile
*
* @param fileName Name of the file to create.
* @return A {@link Mono} containing a {@link DataLakeFileAsyncClient} used to interact with the file created.
*/
public Mono<DataLakeFileAsyncClient> createFile(String fileName) {
return createFile(fileName, false);
}
/**
* Creates a new file within a directory. For more information, see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFile
*
* @param fileName Name of the file to create.
* @param overwrite Whether or not to overwrite, should the file exist.
* @return A {@link Mono} containing a {@link DataLakeFileAsyncClient} used to interact with the file created.
*/
public Mono<DataLakeFileAsyncClient> createFile(String fileName, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
try {
return createFileWithResponse(fileName, null, null, null, null, requestConditions)
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new file within a directory. If a file with the same name already exists, the file will be
* overwritten. For more information, see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFileWithResponse
*
* @param fileName Name of the file to create.
* @param permissions POSIX access permissions for the file owner, the file owning group, and others.
* @param umask Restricts permissions of the file to be created.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the file.
* @param requestConditions {@link DataLakeRequestConditions}
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* DataLakeFileAsyncClient} used to interact with the file created.
*/
public Mono<Response<DataLakeFileAsyncClient>> createFileWithResponse(String fileName, String permissions,
String umask, PathHttpHeaders headers, Map<String, String> metadata,
DataLakeRequestConditions requestConditions) {
try {
DataLakeFileAsyncClient dataLakeFileAsyncClient = getFileAsyncClient(fileName);
return dataLakeFileAsyncClient.createWithResponse(permissions, umask, headers, metadata, requestConditions)
.map(response -> new SimpleResponse<>(response, dataLakeFileAsyncClient));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Deletes the specified file in the file system. If the file doesn't exist the operation fails.
* For more information see the <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFile
*
* @param fileName Name of the file to delete.
* @return A reactive response signalling completion.
*/
public Mono<Void> deleteFile(String fileName) {
try {
return deleteFileWithResponse(fileName, null).flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Deletes the specified file in the directory. If the file doesn't exist the operation fails.
* For more information see the <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFileWithResponse
*
* @param fileName Name of the file to delete.
* @param requestConditions {@link DataLakeRequestConditions}
* @return A {@link Mono} containing containing status code and HTTP headers
*/
public Mono<Response<Void>> deleteFileWithResponse(String fileName, DataLakeRequestConditions requestConditions) {
try {
return getFileAsyncClient(fileName).deleteWithResponse(requestConditions);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new DataLakeDirectoryAsyncClient object by concatenating subdirectoryName to the end of
* DataLakeDirectoryAsyncClient's URL. The new DataLakeDirectoryAsyncClient uses the same request policy pipeline
* as the DataLakeDirectoryAsyncClient.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.getSubdirectoryAsyncClient
*
* @param subdirectoryName A {@code String} representing the name of the sub-directory.
* @return A new {@link DataLakeDirectoryAsyncClient} object which references the directory with the specified name
* in this file system.
*/
public DataLakeDirectoryAsyncClient getSubdirectoryAsyncClient(String subdirectoryName) {
Objects.requireNonNull(subdirectoryName, "'subdirectoryName' can not be set to null");
BlockBlobAsyncClient blockBlobAsyncClient = prepareBuilderAppendPath(subdirectoryName)
.buildBlockBlobAsyncClient();
return new DataLakeDirectoryAsyncClient(getHttpPipeline(),
StorageImplUtils.appendToUrlPath(getPathUrl(), Utility.urlEncode(Utility.urlDecode(subdirectoryName)))
.toString(), getServiceVersion(), getAccountName(), getFileSystemName(), getObjectPath() + "/"
+ Utility.urlDecode(subdirectoryName), blockBlobAsyncClient);
}
/**
* Creates a new sub-directory within a directory. By default this method will not overwrite an existing
* sub-directory. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectory
*
* @param subdirectoryName Name of the sub-directory to create.
* @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the directory
* created.
*/
public Mono<DataLakeDirectoryAsyncClient> createSubdirectory(String subdirectoryName) {
return createSubdirectory(subdirectoryName, false);
}
/**
* Creates a new sub-directory within a directory. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectory
*
* @param subdirectoryName Name of the sub-directory to create.
* @param overwrite Whether or not to overwrite, should the sub directory exist.
* @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the directory
* created.
*/
public Mono<DataLakeDirectoryAsyncClient> createSubdirectory(String subdirectoryName, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
try {
return createSubdirectoryWithResponse(subdirectoryName, null, null, null, null, requestConditions)
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new sub-directory within a directory. If a sub-directory with the same name already exists, the
* sub-directory will be overwritten. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectoryWithResponse
*
* @param subdirectoryName Name of the sub-directory to create.
* @param permissions POSIX access permissions for the sub-directory owner, the sub-directory owning group, and
* others.
* @param umask Restricts permissions of the sub-directory to be created.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the sub-directory.
* @param requestConditions {@link DataLakeRequestConditions}
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* DataLakeDirectoryAsyncClient} used to interact with the sub-directory created.
*/
public Mono<Response<DataLakeDirectoryAsyncClient>> createSubdirectoryWithResponse(String subdirectoryName,
String permissions, String umask, PathHttpHeaders headers, Map<String, String> metadata,
DataLakeRequestConditions requestConditions) {
try {
DataLakeDirectoryAsyncClient dataLakeDirectoryAsyncClient = getSubdirectoryAsyncClient(subdirectoryName);
return dataLakeDirectoryAsyncClient.createWithResponse(permissions, umask, headers, metadata,
requestConditions).map(response -> new SimpleResponse<>(response, dataLakeDirectoryAsyncClient));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Deletes the specified sub-directory in the directory. If the sub-directory doesn't exist or is not empty the
* operation fails.
* For more information see the <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectory
*
* @param subdirectoryName Name of the sub-directory to delete.
* @return A reactive response signalling completion.
*/
public Mono<Void> deleteSubdirectory(String subdirectoryName) {
try {
return deleteSubdirectoryWithResponse(subdirectoryName, false, null).flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Deletes the specified sub-directory in the directory. If the sub-directory doesn't exist or is not empty the
* operation fails.
* For more information see the <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectoryWithResponse
*
* @param directoryName Name of the sub-directory to delete.
* @param recursive Whether or not to delete all paths beneath the sub-directory.
* @param requestConditions {@link DataLakeRequestConditions}
* @return A {@link Mono} containing containing status code and HTTP headers
*/
public Mono<Response<Void>> deleteSubdirectoryWithResponse(String directoryName, boolean recursive,
DataLakeRequestConditions requestConditions) {
try {
return getSubdirectoryAsyncClient(directoryName).deleteWithResponse(recursive, requestConditions);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Moves the directory to another location within the file system.
* For more information see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the directory to, excludes the file system
* name. For example if you want to move a directory with fileSystem = "myfilesystem", path = "mydir/mysubdir" to
* another path in myfilesystem (ex: newdir) then set the destinationPath = "newdir"
* @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the new directory
* created.
*/
public Mono<DataLakeDirectoryAsyncClient> rename(String destinationFileSystem, String destinationPath) {
try {
return renameWithResponse(destinationFileSystem, destinationPath, null, null).flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Moves the directory to another location within the file system.
* For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.renameWithResponse
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the directory to, excludes the file system
* name. For example if you want to move a directory with fileSystem = "myfilesystem", path = "mydir/mysubdir" to
* another path in myfilesystem (ex: newdir) then set the destinationPath = "newdir"
* @param sourceRequestConditions {@link DataLakeRequestConditions} against the source.
* @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination.
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* DataLakeDirectoryAsyncClient} used to interact with the directory created.
*/
public Mono<Response<DataLakeDirectoryAsyncClient>> renameWithResponse(String destinationFileSystem,
String destinationPath, DataLakeRequestConditions sourceRequestConditions,
DataLakeRequestConditions destinationRequestConditions) {
try {
return withContext(context -> renameWithResponse(destinationFileSystem, destinationPath,
sourceRequestConditions, destinationRequestConditions, context)).map(
response -> new SimpleResponse<>(response, new DataLakeDirectoryAsyncClient(response.getValue())));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Returns a reactive Publisher emitting all the files/directories in this account lazily as needed. For more
* information, see the <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.listPaths}
*
* @return A reactive response emitting the list of files/directories.
*/
/**
* Returns a reactive Publisher emitting all the files/directories in this account lazily as needed. For more
* information, see the <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.listPaths
*
* @param recursive
* @return A reactive response emitting the list of files/directories.
*/
public PagedFlux<PathItem> listPaths(boolean recursive, boolean userPrincipleNameReturned, Integer maxResults) {
return listPathsWithOptionalTimeout(recursive, userPrincipleNameReturned, maxResults, null);
}
PagedFlux<PathItem> listPathsWithOptionalTimeout(boolean recursive, boolean userPrincipleNameReturned,
Integer maxResults, Duration timeout) {
Function<String, Mono<PagedResponse<Path>>> func =
marker -> listPathsSegment(marker, recursive, userPrincipleNameReturned, maxResults, timeout)
.map(response -> new PagedResponseBase<>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
response.getValue().getPaths(),
response.getDeserializedHeaders().getContinuation(),
response.getDeserializedHeaders()));
return new PagedFlux<>(() -> func.apply(null), func).mapPage(Transforms::toPathItem);
}
private Mono<FileSystemsListPathsResponse> listPathsSegment(String marker, boolean recursive,
boolean userPrincipleNameReturned, Integer maxResults, Duration timeout) {
return StorageImplUtils.applyOptionalTimeout(
this.fileSystemDataLakeStorage.fileSystems().listPathsWithRestResponseAsync(
recursive, marker, getDirectoryPath(), maxResults, userPrincipleNameReturned, null,
null, Context.NONE), timeout);
}
/**
* Prepares a SpecializedBlobClientBuilder with the pathname appended to the end of the current BlockBlobClient's
* url
* @param pathName The name of the path to append
* @return {@link SpecializedBlobClientBuilder}
*/
SpecializedBlobClientBuilder prepareBuilderAppendPath(String pathName) {
String blobUrl = DataLakeImplUtils.endpointToDesiredEndpoint(getPathUrl(), "blob", "dfs");
return new SpecializedBlobClientBuilder()
.pipeline(getHttpPipeline())
.endpoint(StorageImplUtils.appendToUrlPath(blobUrl, pathName).toString());
}
} | class DataLakeDirectoryAsyncClient extends DataLakePathAsyncClient {
private final ClientLogger logger = new ClientLogger(DataLakeDirectoryAsyncClient.class);
/**
* Package-private constructor for use by {@link DataLakePathClientBuilder}.
*
* @param pipeline The pipeline used to send and receive service requests.
* @param url The endpoint where to send service requests.
* @param serviceVersion The version of the service to receive requests.
* @param accountName The storage account name.
* @param fileSystemName The file system name.
* @param directoryName The directory name.
* @param blockBlobAsyncClient The underlying {@link BlobContainerAsyncClient}
*/
DataLakeDirectoryAsyncClient(HttpPipeline pipeline, String url, DataLakeServiceVersion serviceVersion,
String accountName, String fileSystemName, String directoryName, BlockBlobAsyncClient blockBlobAsyncClient) {
super(pipeline, url, serviceVersion, accountName, fileSystemName, directoryName, PathResourceType.DIRECTORY,
blockBlobAsyncClient);
}
DataLakeDirectoryAsyncClient(DataLakePathAsyncClient dataLakePathAsyncClient) {
super(dataLakePathAsyncClient.getHttpPipeline(), dataLakePathAsyncClient.getPathUrl(),
dataLakePathAsyncClient.getServiceVersion(), dataLakePathAsyncClient.getAccountName(),
dataLakePathAsyncClient.getFileSystemName(), dataLakePathAsyncClient.pathName,
PathResourceType.DIRECTORY, dataLakePathAsyncClient.getBlockBlobAsyncClient());
}
/**
* Gets the URL of the directory represented by this client on the Data Lake service.
*
* @return the URL.
*/
public String getDirectoryUrl() {
return getPathUrl();
}
/**
* Gets the path of this directory, not including the name of the resource itself.
*
* @return The path of the directory.
*/
public String getDirectoryPath() {
return getObjectPath();
}
/**
* Gets the name of this directory, not including its full path.
*
* @return The name of the directory.
*/
public String getDirectoryName() {
return getObjectName();
}
/**
* Deletes a directory.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.delete}
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @return A reactive response signalling completion.
*/
public Mono<Void> delete() {
try {
return deleteWithResponse(false, null).flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Deletes a directory.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param recursive Whether or not to delete all paths beneath the directory.
* @param requestConditions {@link DataLakeRequestConditions}
*
* @return A reactive response signalling completion.
*/
public Mono<Response<Void>> deleteWithResponse(boolean recursive, DataLakeRequestConditions requestConditions) {
try {
return withContext(context -> deleteWithResponse(recursive, requestConditions, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new DataLakeFileAsyncClient object by concatenating fileName to the end of
* DataLakeDirectoryAsyncClient's URL. The new DataLakeFileAsyncClient uses the same request policy pipeline as the
* DataLakeDirectoryAsyncClient.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.getFileAsyncClient
*
* @param fileName A {@code String} representing the name of the file.
* @return A new {@link DataLakeFileAsyncClient} object which references the file with the specified name in this
* file system.
*/
public DataLakeFileAsyncClient getFileAsyncClient(String fileName) {
Objects.requireNonNull(fileName, "'fileName' can not be set to null");
BlockBlobAsyncClient blockBlobAsyncClient = prepareBuilderAppendPath(fileName).buildBlockBlobAsyncClient();
return new DataLakeFileAsyncClient(getHttpPipeline(),
StorageImplUtils.appendToUrlPath(getPathUrl(), Utility.urlEncode(Utility.urlDecode(fileName))).toString(),
getServiceVersion(), getAccountName(), getFileSystemName(), getObjectPath() + "/"
+ Utility.urlDecode(fileName), blockBlobAsyncClient);
}
/**
* Creates a new file within a directory. By default this method will not overwrite an existing file.
* For more information, see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFile
*
* @param fileName Name of the file to create.
* @return A {@link Mono} containing a {@link DataLakeFileAsyncClient} used to interact with the file created.
*/
public Mono<DataLakeFileAsyncClient> createFile(String fileName) {
return createFile(fileName, false);
}
/**
* Creates a new file within a directory. For more information, see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFile
*
* @param fileName Name of the file to create.
* @param overwrite Whether or not to overwrite, should the file exist.
* @return A {@link Mono} containing a {@link DataLakeFileAsyncClient} used to interact with the file created.
*/
public Mono<DataLakeFileAsyncClient> createFile(String fileName, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
try {
return createFileWithResponse(fileName, null, null, null, null, requestConditions)
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new file within a directory. If a file with the same name already exists, the file will be
* overwritten. For more information, see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFileWithResponse
*
* @param fileName Name of the file to create.
* @param permissions POSIX access permissions for the file owner, the file owning group, and others.
* @param umask Restricts permissions of the file to be created.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the file.
* @param requestConditions {@link DataLakeRequestConditions}
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* DataLakeFileAsyncClient} used to interact with the file created.
*/
public Mono<Response<DataLakeFileAsyncClient>> createFileWithResponse(String fileName, String permissions,
String umask, PathHttpHeaders headers, Map<String, String> metadata,
DataLakeRequestConditions requestConditions) {
try {
DataLakeFileAsyncClient dataLakeFileAsyncClient = getFileAsyncClient(fileName);
return dataLakeFileAsyncClient.createWithResponse(permissions, umask, headers, metadata, requestConditions)
.map(response -> new SimpleResponse<>(response, dataLakeFileAsyncClient));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Deletes the specified file in the file system. If the file doesn't exist the operation fails.
* For more information see the <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFile
*
* @param fileName Name of the file to delete.
* @return A reactive response signalling completion.
*/
public Mono<Void> deleteFile(String fileName) {
try {
return deleteFileWithResponse(fileName, null).flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Deletes the specified file in the directory. If the file doesn't exist the operation fails.
* For more information see the <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFileWithResponse
*
* @param fileName Name of the file to delete.
* @param requestConditions {@link DataLakeRequestConditions}
* @return A {@link Mono} containing containing status code and HTTP headers
*/
public Mono<Response<Void>> deleteFileWithResponse(String fileName, DataLakeRequestConditions requestConditions) {
try {
return getFileAsyncClient(fileName).deleteWithResponse(requestConditions);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new DataLakeDirectoryAsyncClient object by concatenating subdirectoryName to the end of
* DataLakeDirectoryAsyncClient's URL. The new DataLakeDirectoryAsyncClient uses the same request policy pipeline
* as the DataLakeDirectoryAsyncClient.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.getSubdirectoryAsyncClient
*
* @param subdirectoryName A {@code String} representing the name of the sub-directory.
* @return A new {@link DataLakeDirectoryAsyncClient} object which references the directory with the specified name
* in this file system.
*/
public DataLakeDirectoryAsyncClient getSubdirectoryAsyncClient(String subdirectoryName) {
Objects.requireNonNull(subdirectoryName, "'subdirectoryName' can not be set to null");
BlockBlobAsyncClient blockBlobAsyncClient = prepareBuilderAppendPath(subdirectoryName)
.buildBlockBlobAsyncClient();
return new DataLakeDirectoryAsyncClient(getHttpPipeline(),
StorageImplUtils.appendToUrlPath(getPathUrl(), Utility.urlEncode(Utility.urlDecode(subdirectoryName)))
.toString(), getServiceVersion(), getAccountName(), getFileSystemName(), getObjectPath() + "/"
+ Utility.urlDecode(subdirectoryName), blockBlobAsyncClient);
}
/**
* Creates a new sub-directory within a directory. By default this method will not overwrite an existing
* sub-directory. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectory
*
* @param subdirectoryName Name of the sub-directory to create.
* @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the directory
* created.
*/
public Mono<DataLakeDirectoryAsyncClient> createSubdirectory(String subdirectoryName) {
return createSubdirectory(subdirectoryName, false);
}
/**
* Creates a new sub-directory within a directory. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectory
*
* @param subdirectoryName Name of the sub-directory to create.
* @param overwrite Whether or not to overwrite, should the sub directory exist.
* @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the directory
* created.
*/
public Mono<DataLakeDirectoryAsyncClient> createSubdirectory(String subdirectoryName, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
try {
return createSubdirectoryWithResponse(subdirectoryName, null, null, null, null, requestConditions)
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new sub-directory within a directory. If a sub-directory with the same name already exists, the
* sub-directory will be overwritten. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectoryWithResponse
*
* @param subdirectoryName Name of the sub-directory to create.
* @param permissions POSIX access permissions for the sub-directory owner, the sub-directory owning group, and
* others.
* @param umask Restricts permissions of the sub-directory to be created.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the sub-directory.
* @param requestConditions {@link DataLakeRequestConditions}
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* DataLakeDirectoryAsyncClient} used to interact with the sub-directory created.
*/
public Mono<Response<DataLakeDirectoryAsyncClient>> createSubdirectoryWithResponse(String subdirectoryName,
String permissions, String umask, PathHttpHeaders headers, Map<String, String> metadata,
DataLakeRequestConditions requestConditions) {
try {
DataLakeDirectoryAsyncClient dataLakeDirectoryAsyncClient = getSubdirectoryAsyncClient(subdirectoryName);
return dataLakeDirectoryAsyncClient.createWithResponse(permissions, umask, headers, metadata,
requestConditions).map(response -> new SimpleResponse<>(response, dataLakeDirectoryAsyncClient));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Deletes the specified sub-directory in the directory. If the sub-directory doesn't exist or is not empty the
* operation fails.
* For more information see the <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectory
*
* @param subdirectoryName Name of the sub-directory to delete.
* @return A reactive response signalling completion.
*/
public Mono<Void> deleteSubdirectory(String subdirectoryName) {
try {
return deleteSubdirectoryWithResponse(subdirectoryName, false, null).flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Deletes the specified sub-directory in the directory. If the sub-directory doesn't exist or is not empty the
* operation fails.
* For more information see the <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectoryWithResponse
*
* @param directoryName Name of the sub-directory to delete.
* @param recursive Whether or not to delete all paths beneath the sub-directory.
* @param requestConditions {@link DataLakeRequestConditions}
* @return A {@link Mono} containing containing status code and HTTP headers
*/
public Mono<Response<Void>> deleteSubdirectoryWithResponse(String directoryName, boolean recursive,
DataLakeRequestConditions requestConditions) {
try {
return getSubdirectoryAsyncClient(directoryName).deleteWithResponse(recursive, requestConditions);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Moves the directory to another location within the file system.
* For more information see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the directory to, excludes the file system
* name. For example if you want to move a directory with fileSystem = "myfilesystem", path = "mydir/mysubdir" to
* another path in myfilesystem (ex: newdir) then set the destinationPath = "newdir"
* @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the new directory
* created.
*/
public Mono<DataLakeDirectoryAsyncClient> rename(String destinationFileSystem, String destinationPath) {
try {
return renameWithResponse(destinationFileSystem, destinationPath, null, null).flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Moves the directory to another location within the file system.
* For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.renameWithResponse
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the directory to, excludes the file system
* name. For example if you want to move a directory with fileSystem = "myfilesystem", path = "mydir/mysubdir" to
* another path in myfilesystem (ex: newdir) then set the destinationPath = "newdir"
* @param sourceRequestConditions {@link DataLakeRequestConditions} against the source.
* @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination.
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* DataLakeDirectoryAsyncClient} used to interact with the directory created.
*/
public Mono<Response<DataLakeDirectoryAsyncClient>> renameWithResponse(String destinationFileSystem,
String destinationPath, DataLakeRequestConditions sourceRequestConditions,
DataLakeRequestConditions destinationRequestConditions) {
try {
return withContext(context -> renameWithResponse(destinationFileSystem, destinationPath,
sourceRequestConditions, destinationRequestConditions, context)).map(
response -> new SimpleResponse<>(response, new DataLakeDirectoryAsyncClient(response.getValue())));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Returns a reactive Publisher emitting all the files/directories in this directory lazily as needed. For more
* information, see the <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.listPaths}
*
* @return A reactive response emitting the list of files/directories.
*/
/**
* Returns a reactive Publisher emitting all the files/directories in this directory lazily as needed. For more
* information, see the <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.listPaths
*
* @param recursive Specifies if the call should recursively include all paths.
* @param userPrincipleNameReturned If "true", the user identity values returned in the x-ms-owner, x-ms-group,
* and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User Principal Names.
* If "false", the values will be returned as Azure Active Directory Object IDs.
* The default value is false. Note that group and application Object IDs are not translated because they do not
* have unique friendly names.
* @param maxResults Specifies the maximum number of blobs to return, including all BlobPrefix elements. If the
* request does not specify maxResults or specifies a value greater than 5,000, the server will return up to
* 5,000 items.
* @return A reactive response emitting the list of files/directories.
*/
public PagedFlux<PathItem> listPaths(boolean recursive, boolean userPrincipleNameReturned, Integer maxResults) {
try {
return listPathsWithOptionalTimeout(recursive, userPrincipleNameReturned, maxResults, null);
} catch (RuntimeException ex) {
return pagedFluxError(logger, ex);
}
}
PagedFlux<PathItem> listPathsWithOptionalTimeout(boolean recursive, boolean userPrincipleNameReturned,
Integer maxResults, Duration timeout) {
Function<String, Mono<PagedResponse<Path>>> func =
marker -> listPathsSegment(marker, recursive, userPrincipleNameReturned, maxResults, timeout)
.map(response -> new PagedResponseBase<>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
response.getValue().getPaths(),
response.getDeserializedHeaders().getContinuation(),
response.getDeserializedHeaders()));
return new PagedFlux<>(() -> func.apply(null), func).mapPage(Transforms::toPathItem);
}
private Mono<FileSystemsListPathsResponse> listPathsSegment(String marker, boolean recursive,
boolean userPrincipleNameReturned, Integer maxResults, Duration timeout) {
return StorageImplUtils.applyOptionalTimeout(
this.fileSystemDataLakeStorage.fileSystems().listPathsWithRestResponseAsync(
recursive, marker, getDirectoryPath(), maxResults, userPrincipleNameReturned, null,
null, Context.NONE), timeout);
}
/**
* Prepares a SpecializedBlobClientBuilder with the pathname appended to the end of the current BlockBlobClient's
* url
* @param pathName The name of the path to append
* @return {@link SpecializedBlobClientBuilder}
*/
SpecializedBlobClientBuilder prepareBuilderAppendPath(String pathName) {
String blobUrl = DataLakeImplUtils.endpointToDesiredEndpoint(getPathUrl(), "blob", "dfs");
return new SpecializedBlobClientBuilder()
.pipeline(getHttpPipeline())
.endpoint(StorageImplUtils.appendToUrlPath(blobUrl, pathName).toString());
}
} |
This seems like an unfortunate perf regression. Why wouldn't the value be stored as a Boolean in the Context? | public Mono<HttpResponse> send(HttpRequest request, Context context) {
boolean eagerlyReadResponse = context.getData("eagerly-read-response")
.map(data -> Boolean.parseBoolean(data.toString()))
.orElse(false);
return toJdkHttpRequest(request)
.flatMap(jdkRequest -> Mono.fromCompletionStage(jdkHttpClient.sendAsync(jdkRequest, ofPublisher()))
.flatMap(innerResponse -> {
if (eagerlyReadResponse) {
int statusCode = innerResponse.statusCode();
HttpHeaders headers = fromJdkHttpHeaders(innerResponse.headers());
return FluxUtil.collectBytesInByteBufferStream(JdkFlowAdapter
.flowPublisherToFlux(innerResponse.body())
.flatMapSequential(Flux::fromIterable))
.map(bytes -> new BufferedJdkHttpResponse(request, statusCode, headers, bytes));
} else {
return Mono.just(new JdkHttpResponse(request, innerResponse));
}
}));
} | .orElse(false); | public Mono<HttpResponse> send(HttpRequest request, Context context) {
boolean eagerlyReadResponse = (boolean) context.getData(EAGERLY_READ_RESPONSE_CONTEXT_KEY).orElse(false);
return toJdkHttpRequest(request)
.flatMap(jdkRequest -> Mono.fromCompletionStage(jdkHttpClient.sendAsync(jdkRequest, ofPublisher()))
.flatMap(innerResponse -> {
if (eagerlyReadResponse) {
int statusCode = innerResponse.statusCode();
HttpHeaders headers = fromJdkHttpHeaders(innerResponse.headers());
return FluxUtil.collectBytesInByteBufferStream(JdkFlowAdapter
.flowPublisherToFlux(innerResponse.body())
.flatMapSequential(Flux::fromIterable))
.map(bytes -> new BufferedJdkHttpResponse(request, statusCode, headers, bytes));
} else {
return Mono.just(new JdkHttpResponse(request, innerResponse));
}
}));
} | class JdkAsyncHttpClient implements HttpClient {
private final ClientLogger logger = new ClientLogger(JdkAsyncHttpClient.class);
private final java.net.http.HttpClient jdkHttpClient;
private final Set<String> restrictedHeaders;
JdkAsyncHttpClient(java.net.http.HttpClient httpClient, Set<String> restrictedHeaders) {
this.jdkHttpClient = httpClient;
int javaVersion = getJavaVersion();
if (javaVersion <= 11) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("JdkAsyncHttpClient is not supported in Java version 11 and below."));
}
this.restrictedHeaders = restrictedHeaders;
logger.verbose("Effective restricted headers: {}", restrictedHeaders);
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
return send(request, Context.NONE);
}
@Override
/**
* Converts the given azure-core request to the JDK HttpRequest type.
*
* @param request the azure-core request
* @return the Mono emitting HttpRequest
*/
private Mono<java.net.http.HttpRequest> toJdkHttpRequest(HttpRequest request) {
return Mono.fromCallable(() -> {
final java.net.http.HttpRequest.Builder builder = java.net.http.HttpRequest.newBuilder();
try {
builder.uri(request.getUrl().toURI());
} catch (URISyntaxException e) {
throw logger.logExceptionAsError(Exceptions.propagate(e));
}
final HttpHeaders headers = request.getHeaders();
if (headers != null) {
for (HttpHeader header : headers) {
final String headerName = header.getName();
if (!restrictedHeaders.contains(headerName)) {
final String headerValue = header.getValue();
builder.setHeader(headerName, headerValue);
} else {
logger.warning("The header '" + headerName + "' is restricted by default in JDK HttpClient 12 "
+ "and above. This header can be added to allow list in JAVA_HOME/conf/net.properties "
+ "or in System.setProperty() or in Configuration. Use the key 'jdk.httpclient"
+ ".allowRestrictedHeaders' and a comma separated list of header names.");
}
}
}
switch (request.getHttpMethod()) {
case GET:
return builder.GET().build();
case HEAD:
return builder.method("HEAD", noBody()).build();
default:
final String contentLength = request.getHeaders().getValue("content-length");
final BodyPublisher bodyPublisher = toBodyPublisher(request.getBody(), contentLength);
return builder.method(request.getHttpMethod().toString(), bodyPublisher).build();
}
});
}
/**
* Create BodyPublisher from the given java.nio.ByteBuffer publisher.
*
* @param bbPublisher stream of java.nio.ByteBuffer representing request content
* @return the request BodyPublisher
*/
private static BodyPublisher toBodyPublisher(Flux<ByteBuffer> bbPublisher, String contentLength) {
if (bbPublisher == null) {
return noBody();
}
final Flow.Publisher<ByteBuffer> bbFlowPublisher = JdkFlowAdapter.publisherToFlowPublisher(bbPublisher);
if (CoreUtils.isNullOrEmpty(contentLength)) {
return fromPublisher(bbFlowPublisher);
} else {
long contentLengthLong = Long.parseLong(contentLength);
if (contentLengthLong < 1) {
return fromPublisher(bbFlowPublisher);
} else {
return fromPublisher(bbFlowPublisher, contentLengthLong);
}
}
}
/**
* Get the java runtime major version.
*
* @return the java major version
*/
private int getJavaVersion() {
String version = System.getProperty("java.version");
if (CoreUtils.isNullOrEmpty(version)) {
throw logger.logExceptionAsError(new RuntimeException("Can't find 'java.version' system property."));
}
if (version.startsWith("1.")) {
if (version.length() < 3) {
throw logger.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version));
}
try {
return Integer.parseInt(version.substring(2, 3));
} catch (Throwable t) {
throw logger.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version, t));
}
} else {
int idx = version.indexOf(".");
if (idx == -1) {
return Integer.parseInt(version);
}
try {
return Integer.parseInt(version.substring(0, idx));
} catch (Throwable t) {
throw logger.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version, t));
}
}
}
/**
* Converts the given JDK Http headers to azure-core Http header.
*
* @param headers the JDK Http headers
* @return the azure-core Http headers
*/
static HttpHeaders fromJdkHttpHeaders(java.net.http.HttpHeaders headers) {
final HttpHeaders httpHeaders = new HttpHeaders();
for (final String key : headers.map().keySet()) {
final List<String> values = headers.allValues(key);
if (CoreUtils.isNullOrEmpty(values)) {
continue;
}
httpHeaders.put(key, values.size() == 1 ? values.get(0) : String.join(",", values));
}
return httpHeaders;
}
} | class JdkAsyncHttpClient implements HttpClient {
private final ClientLogger logger = new ClientLogger(JdkAsyncHttpClient.class);
private final java.net.http.HttpClient jdkHttpClient;
private final Set<String> restrictedHeaders;
JdkAsyncHttpClient(java.net.http.HttpClient httpClient, Set<String> restrictedHeaders) {
this.jdkHttpClient = httpClient;
int javaVersion = getJavaVersion();
if (javaVersion <= 11) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("JdkAsyncHttpClient is not supported in Java version 11 and below."));
}
this.restrictedHeaders = restrictedHeaders;
logger.verbose("Effective restricted headers: {}", restrictedHeaders);
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
return send(request, Context.NONE);
}
@Override
/**
* Converts the given azure-core request to the JDK HttpRequest type.
*
* @param request the azure-core request
* @return the Mono emitting HttpRequest
*/
private Mono<java.net.http.HttpRequest> toJdkHttpRequest(HttpRequest request) {
return Mono.fromCallable(() -> {
final java.net.http.HttpRequest.Builder builder = java.net.http.HttpRequest.newBuilder();
try {
builder.uri(request.getUrl().toURI());
} catch (URISyntaxException e) {
throw logger.logExceptionAsError(Exceptions.propagate(e));
}
final HttpHeaders headers = request.getHeaders();
if (headers != null) {
for (HttpHeader header : headers) {
final String headerName = header.getName();
if (!restrictedHeaders.contains(headerName)) {
final String headerValue = header.getValue();
builder.setHeader(headerName, headerValue);
} else {
logger.warning("The header '" + headerName + "' is restricted by default in JDK HttpClient 12 "
+ "and above. This header can be added to allow list in JAVA_HOME/conf/net.properties "
+ "or in System.setProperty() or in Configuration. Use the key 'jdk.httpclient"
+ ".allowRestrictedHeaders' and a comma separated list of header names.");
}
}
}
switch (request.getHttpMethod()) {
case GET:
return builder.GET().build();
case HEAD:
return builder.method("HEAD", noBody()).build();
default:
final String contentLength = request.getHeaders().getValue("content-length");
final BodyPublisher bodyPublisher = toBodyPublisher(request.getBody(), contentLength);
return builder.method(request.getHttpMethod().toString(), bodyPublisher).build();
}
});
}
/**
* Create BodyPublisher from the given java.nio.ByteBuffer publisher.
*
* @param bbPublisher stream of java.nio.ByteBuffer representing request content
* @return the request BodyPublisher
*/
private static BodyPublisher toBodyPublisher(Flux<ByteBuffer> bbPublisher, String contentLength) {
if (bbPublisher == null) {
return noBody();
}
final Flow.Publisher<ByteBuffer> bbFlowPublisher = JdkFlowAdapter.publisherToFlowPublisher(bbPublisher);
if (CoreUtils.isNullOrEmpty(contentLength)) {
return fromPublisher(bbFlowPublisher);
} else {
long contentLengthLong = Long.parseLong(contentLength);
if (contentLengthLong < 1) {
return fromPublisher(bbFlowPublisher);
} else {
return fromPublisher(bbFlowPublisher, contentLengthLong);
}
}
}
/**
* Get the java runtime major version.
*
* @return the java major version
*/
private int getJavaVersion() {
String version = System.getProperty("java.version");
if (CoreUtils.isNullOrEmpty(version)) {
throw logger.logExceptionAsError(new RuntimeException("Can't find 'java.version' system property."));
}
if (version.startsWith("1.")) {
if (version.length() < 3) {
throw logger.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version));
}
try {
return Integer.parseInt(version.substring(2, 3));
} catch (Throwable t) {
throw logger.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version, t));
}
} else {
int idx = version.indexOf(".");
if (idx == -1) {
return Integer.parseInt(version);
}
try {
return Integer.parseInt(version.substring(0, idx));
} catch (Throwable t) {
throw logger.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version, t));
}
}
}
/**
* Converts the given JDK Http headers to azure-core Http header.
*
* @param headers the JDK Http headers
* @return the azure-core Http headers
*/
static HttpHeaders fromJdkHttpHeaders(java.net.http.HttpHeaders headers) {
final HttpHeaders httpHeaders = new HttpHeaders();
for (final String key : headers.map().keySet()) {
final List<String> values = headers.allValues(key);
if (CoreUtils.isNullOrEmpty(values)) {
continue;
}
httpHeaders.put(key, values.size() == 1 ? values.get(0) : String.join(",", values));
}
return httpHeaders;
}
} |
Updated | public Mono<HttpResponse> send(HttpRequest request, Context context) {
boolean eagerlyReadResponse = context.getData("eagerly-read-response")
.map(data -> Boolean.parseBoolean(data.toString()))
.orElse(false);
return toJdkHttpRequest(request)
.flatMap(jdkRequest -> Mono.fromCompletionStage(jdkHttpClient.sendAsync(jdkRequest, ofPublisher()))
.flatMap(innerResponse -> {
if (eagerlyReadResponse) {
int statusCode = innerResponse.statusCode();
HttpHeaders headers = fromJdkHttpHeaders(innerResponse.headers());
return FluxUtil.collectBytesInByteBufferStream(JdkFlowAdapter
.flowPublisherToFlux(innerResponse.body())
.flatMapSequential(Flux::fromIterable))
.map(bytes -> new BufferedJdkHttpResponse(request, statusCode, headers, bytes));
} else {
return Mono.just(new JdkHttpResponse(request, innerResponse));
}
}));
} | .orElse(false); | public Mono<HttpResponse> send(HttpRequest request, Context context) {
boolean eagerlyReadResponse = (boolean) context.getData(EAGERLY_READ_RESPONSE_CONTEXT_KEY).orElse(false);
return toJdkHttpRequest(request)
.flatMap(jdkRequest -> Mono.fromCompletionStage(jdkHttpClient.sendAsync(jdkRequest, ofPublisher()))
.flatMap(innerResponse -> {
if (eagerlyReadResponse) {
int statusCode = innerResponse.statusCode();
HttpHeaders headers = fromJdkHttpHeaders(innerResponse.headers());
return FluxUtil.collectBytesInByteBufferStream(JdkFlowAdapter
.flowPublisherToFlux(innerResponse.body())
.flatMapSequential(Flux::fromIterable))
.map(bytes -> new BufferedJdkHttpResponse(request, statusCode, headers, bytes));
} else {
return Mono.just(new JdkHttpResponse(request, innerResponse));
}
}));
} | class JdkAsyncHttpClient implements HttpClient {
private final ClientLogger logger = new ClientLogger(JdkAsyncHttpClient.class);
private final java.net.http.HttpClient jdkHttpClient;
private final Set<String> restrictedHeaders;
JdkAsyncHttpClient(java.net.http.HttpClient httpClient, Set<String> restrictedHeaders) {
this.jdkHttpClient = httpClient;
int javaVersion = getJavaVersion();
if (javaVersion <= 11) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("JdkAsyncHttpClient is not supported in Java version 11 and below."));
}
this.restrictedHeaders = restrictedHeaders;
logger.verbose("Effective restricted headers: {}", restrictedHeaders);
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
return send(request, Context.NONE);
}
@Override
/**
* Converts the given azure-core request to the JDK HttpRequest type.
*
* @param request the azure-core request
* @return the Mono emitting HttpRequest
*/
private Mono<java.net.http.HttpRequest> toJdkHttpRequest(HttpRequest request) {
return Mono.fromCallable(() -> {
final java.net.http.HttpRequest.Builder builder = java.net.http.HttpRequest.newBuilder();
try {
builder.uri(request.getUrl().toURI());
} catch (URISyntaxException e) {
throw logger.logExceptionAsError(Exceptions.propagate(e));
}
final HttpHeaders headers = request.getHeaders();
if (headers != null) {
for (HttpHeader header : headers) {
final String headerName = header.getName();
if (!restrictedHeaders.contains(headerName)) {
final String headerValue = header.getValue();
builder.setHeader(headerName, headerValue);
} else {
logger.warning("The header '" + headerName + "' is restricted by default in JDK HttpClient 12 "
+ "and above. This header can be added to allow list in JAVA_HOME/conf/net.properties "
+ "or in System.setProperty() or in Configuration. Use the key 'jdk.httpclient"
+ ".allowRestrictedHeaders' and a comma separated list of header names.");
}
}
}
switch (request.getHttpMethod()) {
case GET:
return builder.GET().build();
case HEAD:
return builder.method("HEAD", noBody()).build();
default:
final String contentLength = request.getHeaders().getValue("content-length");
final BodyPublisher bodyPublisher = toBodyPublisher(request.getBody(), contentLength);
return builder.method(request.getHttpMethod().toString(), bodyPublisher).build();
}
});
}
/**
* Create BodyPublisher from the given java.nio.ByteBuffer publisher.
*
* @param bbPublisher stream of java.nio.ByteBuffer representing request content
* @return the request BodyPublisher
*/
private static BodyPublisher toBodyPublisher(Flux<ByteBuffer> bbPublisher, String contentLength) {
if (bbPublisher == null) {
return noBody();
}
final Flow.Publisher<ByteBuffer> bbFlowPublisher = JdkFlowAdapter.publisherToFlowPublisher(bbPublisher);
if (CoreUtils.isNullOrEmpty(contentLength)) {
return fromPublisher(bbFlowPublisher);
} else {
long contentLengthLong = Long.parseLong(contentLength);
if (contentLengthLong < 1) {
return fromPublisher(bbFlowPublisher);
} else {
return fromPublisher(bbFlowPublisher, contentLengthLong);
}
}
}
/**
* Get the java runtime major version.
*
* @return the java major version
*/
private int getJavaVersion() {
String version = System.getProperty("java.version");
if (CoreUtils.isNullOrEmpty(version)) {
throw logger.logExceptionAsError(new RuntimeException("Can't find 'java.version' system property."));
}
if (version.startsWith("1.")) {
if (version.length() < 3) {
throw logger.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version));
}
try {
return Integer.parseInt(version.substring(2, 3));
} catch (Throwable t) {
throw logger.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version, t));
}
} else {
int idx = version.indexOf(".");
if (idx == -1) {
return Integer.parseInt(version);
}
try {
return Integer.parseInt(version.substring(0, idx));
} catch (Throwable t) {
throw logger.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version, t));
}
}
}
/**
* Converts the given JDK Http headers to azure-core Http header.
*
* @param headers the JDK Http headers
* @return the azure-core Http headers
*/
static HttpHeaders fromJdkHttpHeaders(java.net.http.HttpHeaders headers) {
final HttpHeaders httpHeaders = new HttpHeaders();
for (final String key : headers.map().keySet()) {
final List<String> values = headers.allValues(key);
if (CoreUtils.isNullOrEmpty(values)) {
continue;
}
httpHeaders.put(key, values.size() == 1 ? values.get(0) : String.join(",", values));
}
return httpHeaders;
}
} | class JdkAsyncHttpClient implements HttpClient {
private final ClientLogger logger = new ClientLogger(JdkAsyncHttpClient.class);
private final java.net.http.HttpClient jdkHttpClient;
private final Set<String> restrictedHeaders;
JdkAsyncHttpClient(java.net.http.HttpClient httpClient, Set<String> restrictedHeaders) {
this.jdkHttpClient = httpClient;
int javaVersion = getJavaVersion();
if (javaVersion <= 11) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("JdkAsyncHttpClient is not supported in Java version 11 and below."));
}
this.restrictedHeaders = restrictedHeaders;
logger.verbose("Effective restricted headers: {}", restrictedHeaders);
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
return send(request, Context.NONE);
}
@Override
/**
* Converts the given azure-core request to the JDK HttpRequest type.
*
* @param request the azure-core request
* @return the Mono emitting HttpRequest
*/
private Mono<java.net.http.HttpRequest> toJdkHttpRequest(HttpRequest request) {
return Mono.fromCallable(() -> {
final java.net.http.HttpRequest.Builder builder = java.net.http.HttpRequest.newBuilder();
try {
builder.uri(request.getUrl().toURI());
} catch (URISyntaxException e) {
throw logger.logExceptionAsError(Exceptions.propagate(e));
}
final HttpHeaders headers = request.getHeaders();
if (headers != null) {
for (HttpHeader header : headers) {
final String headerName = header.getName();
if (!restrictedHeaders.contains(headerName)) {
final String headerValue = header.getValue();
builder.setHeader(headerName, headerValue);
} else {
logger.warning("The header '" + headerName + "' is restricted by default in JDK HttpClient 12 "
+ "and above. This header can be added to allow list in JAVA_HOME/conf/net.properties "
+ "or in System.setProperty() or in Configuration. Use the key 'jdk.httpclient"
+ ".allowRestrictedHeaders' and a comma separated list of header names.");
}
}
}
switch (request.getHttpMethod()) {
case GET:
return builder.GET().build();
case HEAD:
return builder.method("HEAD", noBody()).build();
default:
final String contentLength = request.getHeaders().getValue("content-length");
final BodyPublisher bodyPublisher = toBodyPublisher(request.getBody(), contentLength);
return builder.method(request.getHttpMethod().toString(), bodyPublisher).build();
}
});
}
/**
* Create BodyPublisher from the given java.nio.ByteBuffer publisher.
*
* @param bbPublisher stream of java.nio.ByteBuffer representing request content
* @return the request BodyPublisher
*/
private static BodyPublisher toBodyPublisher(Flux<ByteBuffer> bbPublisher, String contentLength) {
if (bbPublisher == null) {
return noBody();
}
final Flow.Publisher<ByteBuffer> bbFlowPublisher = JdkFlowAdapter.publisherToFlowPublisher(bbPublisher);
if (CoreUtils.isNullOrEmpty(contentLength)) {
return fromPublisher(bbFlowPublisher);
} else {
long contentLengthLong = Long.parseLong(contentLength);
if (contentLengthLong < 1) {
return fromPublisher(bbFlowPublisher);
} else {
return fromPublisher(bbFlowPublisher, contentLengthLong);
}
}
}
/**
* Get the java runtime major version.
*
* @return the java major version
*/
private int getJavaVersion() {
String version = System.getProperty("java.version");
if (CoreUtils.isNullOrEmpty(version)) {
throw logger.logExceptionAsError(new RuntimeException("Can't find 'java.version' system property."));
}
if (version.startsWith("1.")) {
if (version.length() < 3) {
throw logger.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version));
}
try {
return Integer.parseInt(version.substring(2, 3));
} catch (Throwable t) {
throw logger.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version, t));
}
} else {
int idx = version.indexOf(".");
if (idx == -1) {
return Integer.parseInt(version);
}
try {
return Integer.parseInt(version.substring(0, idx));
} catch (Throwable t) {
throw logger.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version, t));
}
}
}
/**
* Converts the given JDK Http headers to azure-core Http header.
*
* @param headers the JDK Http headers
* @return the azure-core Http headers
*/
static HttpHeaders fromJdkHttpHeaders(java.net.http.HttpHeaders headers) {
final HttpHeaders httpHeaders = new HttpHeaders();
for (final String key : headers.map().keySet()) {
final List<String> values = headers.allValues(key);
if (CoreUtils.isNullOrEmpty(values)) {
continue;
}
httpHeaders.put(key, values.size() == 1 ? values.get(0) : String.join(",", values));
}
return httpHeaders;
}
} |
Yeah, this caused an exception in `GeoArrayTests` - NPE was thrown instead of IndexOutOfBoundsException. | private static String sanitizeLogMessageInput(String logMessage) {
if (CoreUtils.isNullOrEmpty(logMessage)) {
return logMessage;
}
return CRLF_PATTERN.matcher(logMessage).replaceAll("");
} | if (CoreUtils.isNullOrEmpty(logMessage)) { | private static String sanitizeLogMessageInput(String logMessage) {
if (CoreUtils.isNullOrEmpty(logMessage)) {
return logMessage;
}
return CRLF_PATTERN.matcher(logMessage).replaceAll("");
} | class name using the {@link LoggerFactory} | class name using the {@link LoggerFactory} |
```suggestion // BEGIN: com.azure.storage.blob.specialized.BlobAsyncClientBase.getContainerAsyncClient ``` | public void getContainerClient() {
BlobContainerAsyncClient containerClient = client.getContainerAsyncClient();
System.out.println("The name of the container is " + containerClient.getBlobContainerName());
} | public void getContainerClient() {
BlobContainerAsyncClient containerClient = client.getContainerAsyncClient();
System.out.println("The name of the container is " + containerClient.getBlobContainerName());
} | class BlobAsyncClientJavaDocCodeSnippets {
private BlobAsyncClient client = JavaDocCodeSnippetsHelpers.getBlobAsyncClient("blobName");
private Flux<ByteBuffer> data = Flux.just(ByteBuffer.wrap("data".getBytes(StandardCharsets.UTF_8)));
private String leaseId = "leaseId";
private String copyId = "copyId";
private String url = "https:
private String file = "file";
private long blockSize = 50;
private int maxConcurrency = 2;
private String filePath = "filePath";
private UserDelegationKey userDelegationKey = JavaDocCodeSnippetsHelpers.getUserDelegationKey();
/**
* Code snippet for {@link BlobAsyncClient
*/
public void existsCodeSnippet() {
client.exists().subscribe(response -> System.out.printf("Exists? %b%n", response));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void abortCopyFromUrlCodeSnippet() {
client.abortCopyFromUrl(copyId).doOnSuccess(response -> System.out.println("Aborted copy from URL"));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void copyFromUrlCodeSnippet() {
client.copyFromUrl(url).subscribe(response -> System.out.printf("Copy identifier: %s%n", response));
}
/**
* Code snippets for {@link BlobAsyncClient
*
* @throws UncheckedIOException If an I/O error occurs
*/
public void downloadCodeSnippet() {
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
client.download().subscribe(piece -> {
try {
downloadData.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
});
}
/**
* Code snippets for {@link BlobAsyncClient
* BlobRequestConditions, boolean)}
*
* @throws UncheckedIOException If an I/O error occurs
*/
public void downloadWithResponseCodeSnippet() {
BlobRange range = new BlobRange(1024, 2048L);
DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
client.downloadWithResponse(range, options, null, false).subscribe(response -> {
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
response.getValue().subscribe(piece -> {
try {
downloadData.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
});
});
}
/**
* Code snippets for {@link BlobAsyncClient
* String, BlobRange, ParallelTransferOptions, DownloadRetryOptions, BlobRequestConditions, boolean)}
*/
public void downloadToFileCodeSnippet() {
client.downloadToFile(file).subscribe(response -> System.out.println("Completed download to file"));
BlobRange range = new BlobRange(1024, 2048L);
DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
client.downloadToFileWithResponse(file, range, null, options, null, false)
.subscribe(response -> System.out.println("Completed download to file"));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void deleteCodeSnippet() {
client.delete().doOnSuccess(response -> System.out.println("Completed delete"));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void getPropertiesCodeSnippet() {
client.getProperties().subscribe(response ->
System.out.printf("Type: %s, Size: %d%n", response.getBlobType(), response.getBlobSize()));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void setHTTPHeadersCodeSnippet() {
client.setHttpHeaders(new BlobHttpHeaders()
.setContentLanguage("en-US")
.setContentType("binary"));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void setMetadataCodeSnippet() {
client.setMetadata(Collections.singletonMap("metadata", "value"));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void createSnapshotCodeSnippet() {
client.createSnapshot()
.subscribe(response -> System.out.printf("Identifier for the snapshot is %s%n",
response.getSnapshotId()));
}
/**
* Code snippets for {@link BlobAsyncClientBase
*/
public void setTierCodeSnippet() {
client.setAccessTier(AccessTier.HOT);
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void undeleteCodeSnippet() {
client.undelete().doOnSuccess(response -> System.out.println("Completed undelete"));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void getAccountInfoCodeSnippet() {
client.getAccountInfo().subscribe(response -> System.out.printf("Account Kind: %s, SKU: %s%n",
response.getAccountKind(), response.getSkuName()));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void existsWithResponseCodeSnippet() {
client.existsWithResponse().subscribe(response -> System.out.printf("Exists? %b%n", response.getValue()));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void abortCopyFromUrlWithResponseCodeSnippets() {
client.abortCopyFromUrlWithResponse(copyId, leaseId)
.subscribe(
response -> System.out.printf("Aborted copy completed with status %d%n", response.getStatusCode()));
}
/**
* Code snippets for {@link BlobAsyncClient
* RequestConditions, BlobRequestConditions)}
*/
public void copyFromUrlWithResponseCodeSnippets() {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
RequestConditions modifiedRequestConditions = new RequestConditions()
.setIfUnmodifiedSince(OffsetDateTime.now().minusDays(7));
BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
client.copyFromUrlWithResponse(url, metadata, AccessTier.HOT, modifiedRequestConditions, blobRequestConditions)
.subscribe(response -> System.out.printf("Copy identifier: %s%n", response));
}
/**
* Code snippets for {@link BlobAsyncClient
* BlobRequestConditions, boolean)}
*
* @throws UncheckedIOException If an I/O error occurs
*/
public void downloadWithResponseCodeSnippets() {
BlobRange range = new BlobRange(1024, (long) 2048);
DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
client.downloadWithResponse(range, options, null, false).subscribe(response -> {
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
response.getValue().subscribe(piece -> {
try {
downloadData.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
});
});
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void deleteWithResponseCodeSnippets() {
client.deleteWithResponse(DeleteSnapshotsOptionType.INCLUDE, null)
.subscribe(response -> System.out.printf("Delete completed with status %d%n", response.getStatusCode()));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void getPropertiesWithResponseCodeSnippets() {
BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
client.getPropertiesWithResponse(requestConditions).subscribe(
response -> System.out.printf("Type: %s, Size: %d%n", response.getValue().getBlobType(),
response.getValue().getBlobSize()));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void setHTTPHeadersWithResponseCodeSnippets() {
BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
client.setHttpHeadersWithResponse(new BlobHttpHeaders()
.setContentLanguage("en-US")
.setContentType("binary"), requestConditions).subscribe(
response ->
System.out.printf("Set HTTP headers completed with status %d%n",
response.getStatusCode()));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void setMetadataWithResponseCodeSnippets() {
BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
client.setMetadataWithResponse(Collections.singletonMap("metadata", "value"), requestConditions)
.subscribe(
response -> System.out.printf("Set metadata completed with status %d%n", response.getStatusCode()));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void createSnapshotWithResponseCodeSnippets() {
Map<String, String> snapshotMetadata = Collections.singletonMap("metadata", "value");
BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
client.createSnapshotWithResponse(snapshotMetadata, requestConditions)
.subscribe(response -> System.out.printf("Identifier for the snapshot is %s%n", response.getValue()));
}
/**
* Code snippets for {@link BlobAsyncClientBase
*/
public void setTierWithResponseCodeSnippets() {
client.setAccessTierWithResponse(AccessTier.HOT, RehydratePriority.STANDARD, leaseId)
.subscribe(response -> System.out.printf("Set tier completed with status code %d%n",
response.getStatusCode()));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void undeleteWithResponseCodeSnippets() {
client.undeleteWithResponse()
.subscribe(response -> System.out.printf("Undelete completed with status %d%n", response.getStatusCode()));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void getAccountInfoWithResponseCodeSnippets() {
client.getAccountInfoWithResponse().subscribe(response -> System.out.printf("Account Kind: %s, SKU: %s%n",
response.getValue().getAccountKind(), response.getValue().getSkuName()));
}
/**
* Generates a code sample for using {@link BlobAsyncClient
*/
public void getContainerName() {
String containerName = client.getContainerName();
System.out.println("The name of the container is " + containerName);
}
/**
* Generates a code sample for using {@link BlobAsyncClient
*/
/**
* Generates a code sample for using {@link BlobAsyncClient
*/
public void getBlobName() {
String blobName = client.getBlobName();
System.out.println("The name of the blob is " + blobName);
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void upload3() {
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(blockSize)
.setMaxConcurrency(maxConcurrency);
client.upload(data, parallelTransferOptions).subscribe(response ->
System.out.printf("Uploaded BlockBlob MD5 is %s%n",
Base64.getEncoder().encodeToString(response.getContentMd5())));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void uploadOverwrite() {
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(blockSize)
.setMaxConcurrency(maxConcurrency);
boolean overwrite = false;
client.upload(data, parallelTransferOptions, overwrite).subscribe(response ->
System.out.printf("Uploaded BlockBlob MD5 is %s%n",
Base64.getEncoder().encodeToString(response.getContentMd5())));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void upload4() {
BlobHttpHeaders headers = new BlobHttpHeaders()
.setContentMd5("data".getBytes(StandardCharsets.UTF_8))
.setContentLanguage("en-US")
.setContentType("binary");
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
BlobRequestConditions requestConditions = new BlobRequestConditions()
.setLeaseId(leaseId)
.setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(blockSize)
.setMaxConcurrency(maxConcurrency);
client.uploadWithResponse(data, parallelTransferOptions, headers, metadata, AccessTier.HOT, requestConditions)
.subscribe(response -> System.out.printf("Uploaded BlockBlob MD5 is %s%n",
Base64.getEncoder().encodeToString(response.getValue().getContentMd5())));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void upload5() {
BlobHttpHeaders headers = new BlobHttpHeaders()
.setContentMd5("data".getBytes(StandardCharsets.UTF_8))
.setContentLanguage("en-US")
.setContentType("binary");
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
BlobRequestConditions requestConditions = new BlobRequestConditions()
.setLeaseId(leaseId)
.setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(blockSize)
.setMaxConcurrency(maxConcurrency)
.setProgressReceiver(bytesTransferred -> System.out.printf("Upload progress: %s bytes sent", bytesTransferred));
client.uploadWithResponse(data, parallelTransferOptions, headers, metadata, AccessTier.HOT, requestConditions)
.subscribe(response -> System.out.printf("Uploaded BlockBlob MD5 is %s%n",
Base64.getEncoder().encodeToString(response.getValue().getContentMd5())));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void upload6() {
BlobHttpHeaders headers = new BlobHttpHeaders()
.setContentMd5("data".getBytes(StandardCharsets.UTF_8))
.setContentLanguage("en-US")
.setContentType("binary");
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
Map<String, String> tags = Collections.singletonMap("tag", "value");
BlobRequestConditions requestConditions = new BlobRequestConditions()
.setLeaseId(leaseId)
.setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(maxConcurrency).setProgressReceiver(bytesTransferred ->
System.out.printf("Upload progress: %s bytes sent", bytesTransferred));
client.uploadWithResponse(new BlobParallelUploadOptions(data)
.setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata).setTags(tags)
.setTier(AccessTier.HOT).setRequestConditions(requestConditions))
.subscribe(response -> System.out.printf("Uploaded BlockBlob MD5 is %s%n",
Base64.getEncoder().encodeToString(response.getValue().getContentMd5())));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void uploadFromFile() {
client.uploadFromFile(filePath)
.doOnError(throwable -> System.err.printf("Failed to upload from file %s%n", throwable.getMessage()))
.subscribe(completion -> System.out.println("Upload from file succeeded"));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void uploadFromFileOverwrite() {
boolean overwrite = false;
client.uploadFromFile(filePath, overwrite)
.doOnError(throwable -> System.err.printf("Failed to upload from file %s%n", throwable.getMessage()))
.subscribe(completion -> System.out.println("Upload from file succeeded"));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void uploadFromFile2() {
BlobHttpHeaders headers = new BlobHttpHeaders()
.setContentMd5("data".getBytes(StandardCharsets.UTF_8))
.setContentLanguage("en-US")
.setContentType("binary");
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
BlobRequestConditions requestConditions = new BlobRequestConditions()
.setLeaseId(leaseId)
.setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
client.uploadFromFile(filePath,
new ParallelTransferOptions().setBlockSizeLong(BlockBlobClient.MAX_STAGE_BLOCK_BYTES_LONG),
headers, metadata, AccessTier.HOT, requestConditions)
.doOnError(throwable -> System.err.printf("Failed to upload from file %s%n", throwable.getMessage()))
.subscribe(completion -> System.out.println("Upload from file succeeded"));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void uploadFromFile3() {
BlobHttpHeaders headers = new BlobHttpHeaders()
.setContentMd5("data".getBytes(StandardCharsets.UTF_8))
.setContentLanguage("en-US")
.setContentType("binary");
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
Map<String, String> tags = Collections.singletonMap("tag", "value");
BlobRequestConditions requestConditions = new BlobRequestConditions()
.setLeaseId(leaseId)
.setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
client.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath)
.setParallelTransferOptions(
new ParallelTransferOptions().setBlockSizeLong(BlobAsyncClient.BLOB_MAX_UPLOAD_BLOCK_SIZE))
.setHeaders(headers).setMetadata(metadata).setTags(tags).setTier(AccessTier.HOT)
.setRequestConditions(requestConditions))
.doOnError(throwable -> System.err.printf("Failed to upload from file %s%n", throwable.getMessage()))
.subscribe(completion -> System.out.println("Upload from file succeeded"));
}
} | class BlobAsyncClientJavaDocCodeSnippets {
private BlobAsyncClient client = JavaDocCodeSnippetsHelpers.getBlobAsyncClient("blobName");
private Flux<ByteBuffer> data = Flux.just(ByteBuffer.wrap("data".getBytes(StandardCharsets.UTF_8)));
private String leaseId = "leaseId";
private String copyId = "copyId";
private String url = "https:
private String file = "file";
private long blockSize = 50;
private int maxConcurrency = 2;
private String filePath = "filePath";
private UserDelegationKey userDelegationKey = JavaDocCodeSnippetsHelpers.getUserDelegationKey();
/**
* Code snippet for {@link BlobAsyncClient
*/
public void existsCodeSnippet() {
client.exists().subscribe(response -> System.out.printf("Exists? %b%n", response));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void abortCopyFromUrlCodeSnippet() {
client.abortCopyFromUrl(copyId).doOnSuccess(response -> System.out.println("Aborted copy from URL"));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void copyFromUrlCodeSnippet() {
client.copyFromUrl(url).subscribe(response -> System.out.printf("Copy identifier: %s%n", response));
}
/**
* Code snippets for {@link BlobAsyncClient
*
* @throws UncheckedIOException If an I/O error occurs
*/
public void downloadCodeSnippet() {
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
client.download().subscribe(piece -> {
try {
downloadData.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
});
}
/**
* Code snippets for {@link BlobAsyncClient
* BlobRequestConditions, boolean)}
*
* @throws UncheckedIOException If an I/O error occurs
*/
public void downloadWithResponseCodeSnippet() {
BlobRange range = new BlobRange(1024, 2048L);
DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
client.downloadWithResponse(range, options, null, false).subscribe(response -> {
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
response.getValue().subscribe(piece -> {
try {
downloadData.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
});
});
}
/**
* Code snippets for {@link BlobAsyncClient
* String, BlobRange, ParallelTransferOptions, DownloadRetryOptions, BlobRequestConditions, boolean)}
*/
public void downloadToFileCodeSnippet() {
client.downloadToFile(file).subscribe(response -> System.out.println("Completed download to file"));
BlobRange range = new BlobRange(1024, 2048L);
DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
client.downloadToFileWithResponse(file, range, null, options, null, false)
.subscribe(response -> System.out.println("Completed download to file"));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void deleteCodeSnippet() {
client.delete().doOnSuccess(response -> System.out.println("Completed delete"));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void getPropertiesCodeSnippet() {
client.getProperties().subscribe(response ->
System.out.printf("Type: %s, Size: %d%n", response.getBlobType(), response.getBlobSize()));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void setHTTPHeadersCodeSnippet() {
client.setHttpHeaders(new BlobHttpHeaders()
.setContentLanguage("en-US")
.setContentType("binary"));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void setMetadataCodeSnippet() {
client.setMetadata(Collections.singletonMap("metadata", "value"));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void createSnapshotCodeSnippet() {
client.createSnapshot()
.subscribe(response -> System.out.printf("Identifier for the snapshot is %s%n",
response.getSnapshotId()));
}
/**
* Code snippets for {@link BlobAsyncClientBase
*/
public void setTierCodeSnippet() {
client.setAccessTier(AccessTier.HOT);
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void undeleteCodeSnippet() {
client.undelete().doOnSuccess(response -> System.out.println("Completed undelete"));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void getAccountInfoCodeSnippet() {
client.getAccountInfo().subscribe(response -> System.out.printf("Account Kind: %s, SKU: %s%n",
response.getAccountKind(), response.getSkuName()));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void existsWithResponseCodeSnippet() {
client.existsWithResponse().subscribe(response -> System.out.printf("Exists? %b%n", response.getValue()));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void abortCopyFromUrlWithResponseCodeSnippets() {
client.abortCopyFromUrlWithResponse(copyId, leaseId)
.subscribe(
response -> System.out.printf("Aborted copy completed with status %d%n", response.getStatusCode()));
}
/**
* Code snippets for {@link BlobAsyncClient
* RequestConditions, BlobRequestConditions)}
*/
public void copyFromUrlWithResponseCodeSnippets() {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
RequestConditions modifiedRequestConditions = new RequestConditions()
.setIfUnmodifiedSince(OffsetDateTime.now().minusDays(7));
BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
client.copyFromUrlWithResponse(url, metadata, AccessTier.HOT, modifiedRequestConditions, blobRequestConditions)
.subscribe(response -> System.out.printf("Copy identifier: %s%n", response));
}
/**
* Code snippets for {@link BlobAsyncClient
* BlobRequestConditions, boolean)}
*
* @throws UncheckedIOException If an I/O error occurs
*/
public void downloadWithResponseCodeSnippets() {
BlobRange range = new BlobRange(1024, (long) 2048);
DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
client.downloadWithResponse(range, options, null, false).subscribe(response -> {
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
response.getValue().subscribe(piece -> {
try {
downloadData.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
});
});
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void deleteWithResponseCodeSnippets() {
client.deleteWithResponse(DeleteSnapshotsOptionType.INCLUDE, null)
.subscribe(response -> System.out.printf("Delete completed with status %d%n", response.getStatusCode()));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void getPropertiesWithResponseCodeSnippets() {
BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
client.getPropertiesWithResponse(requestConditions).subscribe(
response -> System.out.printf("Type: %s, Size: %d%n", response.getValue().getBlobType(),
response.getValue().getBlobSize()));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void setHTTPHeadersWithResponseCodeSnippets() {
BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
client.setHttpHeadersWithResponse(new BlobHttpHeaders()
.setContentLanguage("en-US")
.setContentType("binary"), requestConditions).subscribe(
response ->
System.out.printf("Set HTTP headers completed with status %d%n",
response.getStatusCode()));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void setMetadataWithResponseCodeSnippets() {
BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
client.setMetadataWithResponse(Collections.singletonMap("metadata", "value"), requestConditions)
.subscribe(
response -> System.out.printf("Set metadata completed with status %d%n", response.getStatusCode()));
}
/**
* Code snippets for {@link BlobAsyncClient
*/
public void createSnapshotWithResponseCodeSnippets() {
Map<String, String> snapshotMetadata = Collections.singletonMap("metadata", "value");
BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
client.createSnapshotWithResponse(snapshotMetadata, requestConditions)
.subscribe(response -> System.out.printf("Identifier for the snapshot is %s%n", response.getValue()));
}
/**
* Code snippets for {@link BlobAsyncClientBase
*/
public void setTierWithResponseCodeSnippets() {
client.setAccessTierWithResponse(AccessTier.HOT, RehydratePriority.STANDARD, leaseId)
.subscribe(response -> System.out.printf("Set tier completed with status code %d%n",
response.getStatusCode()));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void undeleteWithResponseCodeSnippets() {
client.undeleteWithResponse()
.subscribe(response -> System.out.printf("Undelete completed with status %d%n", response.getStatusCode()));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void getAccountInfoWithResponseCodeSnippets() {
client.getAccountInfoWithResponse().subscribe(response -> System.out.printf("Account Kind: %s, SKU: %s%n",
response.getValue().getAccountKind(), response.getValue().getSkuName()));
}
/**
* Generates a code sample for using {@link BlobAsyncClient
*/
public void getContainerName() {
String containerName = client.getContainerName();
System.out.println("The name of the container is " + containerName);
}
/**
* Generates a code sample for using {@link BlobAsyncClient
*/
/**
* Generates a code sample for using {@link BlobAsyncClient
*/
public void getBlobName() {
String blobName = client.getBlobName();
System.out.println("The name of the blob is " + blobName);
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void upload3() {
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(blockSize)
.setMaxConcurrency(maxConcurrency);
client.upload(data, parallelTransferOptions).subscribe(response ->
System.out.printf("Uploaded BlockBlob MD5 is %s%n",
Base64.getEncoder().encodeToString(response.getContentMd5())));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void uploadOverwrite() {
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(blockSize)
.setMaxConcurrency(maxConcurrency);
boolean overwrite = false;
client.upload(data, parallelTransferOptions, overwrite).subscribe(response ->
System.out.printf("Uploaded BlockBlob MD5 is %s%n",
Base64.getEncoder().encodeToString(response.getContentMd5())));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void upload4() {
BlobHttpHeaders headers = new BlobHttpHeaders()
.setContentMd5("data".getBytes(StandardCharsets.UTF_8))
.setContentLanguage("en-US")
.setContentType("binary");
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
BlobRequestConditions requestConditions = new BlobRequestConditions()
.setLeaseId(leaseId)
.setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(blockSize)
.setMaxConcurrency(maxConcurrency);
client.uploadWithResponse(data, parallelTransferOptions, headers, metadata, AccessTier.HOT, requestConditions)
.subscribe(response -> System.out.printf("Uploaded BlockBlob MD5 is %s%n",
Base64.getEncoder().encodeToString(response.getValue().getContentMd5())));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void upload5() {
BlobHttpHeaders headers = new BlobHttpHeaders()
.setContentMd5("data".getBytes(StandardCharsets.UTF_8))
.setContentLanguage("en-US")
.setContentType("binary");
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
BlobRequestConditions requestConditions = new BlobRequestConditions()
.setLeaseId(leaseId)
.setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(blockSize)
.setMaxConcurrency(maxConcurrency)
.setProgressReceiver(bytesTransferred -> System.out.printf("Upload progress: %s bytes sent", bytesTransferred));
client.uploadWithResponse(data, parallelTransferOptions, headers, metadata, AccessTier.HOT, requestConditions)
.subscribe(response -> System.out.printf("Uploaded BlockBlob MD5 is %s%n",
Base64.getEncoder().encodeToString(response.getValue().getContentMd5())));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void upload6() {
BlobHttpHeaders headers = new BlobHttpHeaders()
.setContentMd5("data".getBytes(StandardCharsets.UTF_8))
.setContentLanguage("en-US")
.setContentType("binary");
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
Map<String, String> tags = Collections.singletonMap("tag", "value");
BlobRequestConditions requestConditions = new BlobRequestConditions()
.setLeaseId(leaseId)
.setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(maxConcurrency).setProgressReceiver(bytesTransferred ->
System.out.printf("Upload progress: %s bytes sent", bytesTransferred));
client.uploadWithResponse(new BlobParallelUploadOptions(data)
.setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata).setTags(tags)
.setTier(AccessTier.HOT).setRequestConditions(requestConditions))
.subscribe(response -> System.out.printf("Uploaded BlockBlob MD5 is %s%n",
Base64.getEncoder().encodeToString(response.getValue().getContentMd5())));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void uploadFromFile() {
client.uploadFromFile(filePath)
.doOnError(throwable -> System.err.printf("Failed to upload from file %s%n", throwable.getMessage()))
.subscribe(completion -> System.out.println("Upload from file succeeded"));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void uploadFromFileOverwrite() {
boolean overwrite = false;
client.uploadFromFile(filePath, overwrite)
.doOnError(throwable -> System.err.printf("Failed to upload from file %s%n", throwable.getMessage()))
.subscribe(completion -> System.out.println("Upload from file succeeded"));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void uploadFromFile2() {
BlobHttpHeaders headers = new BlobHttpHeaders()
.setContentMd5("data".getBytes(StandardCharsets.UTF_8))
.setContentLanguage("en-US")
.setContentType("binary");
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
BlobRequestConditions requestConditions = new BlobRequestConditions()
.setLeaseId(leaseId)
.setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
client.uploadFromFile(filePath,
new ParallelTransferOptions().setBlockSizeLong(BlockBlobClient.MAX_STAGE_BLOCK_BYTES_LONG),
headers, metadata, AccessTier.HOT, requestConditions)
.doOnError(throwable -> System.err.printf("Failed to upload from file %s%n", throwable.getMessage()))
.subscribe(completion -> System.out.println("Upload from file succeeded"));
}
/**
* Code snippet for {@link BlobAsyncClient
*/
public void uploadFromFile3() {
BlobHttpHeaders headers = new BlobHttpHeaders()
.setContentMd5("data".getBytes(StandardCharsets.UTF_8))
.setContentLanguage("en-US")
.setContentType("binary");
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
Map<String, String> tags = Collections.singletonMap("tag", "value");
BlobRequestConditions requestConditions = new BlobRequestConditions()
.setLeaseId(leaseId)
.setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
client.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath)
.setParallelTransferOptions(
new ParallelTransferOptions().setBlockSizeLong(BlobAsyncClient.BLOB_MAX_UPLOAD_BLOCK_SIZE))
.setHeaders(headers).setMetadata(metadata).setTags(tags).setTier(AccessTier.HOT)
.setRequestConditions(requestConditions))
.doOnError(throwable -> System.err.printf("Failed to upload from file %s%n", throwable.getMessage()))
.subscribe(completion -> System.out.println("Upload from file succeeded"));
}
} | |
Do you need the create the `authPolicy` variable? Can we just return directly? | private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
HttpPipelinePolicy authPolicy;
if (this.tokenCredential != null) {
authPolicy = new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.credential != null) {
authPolicy = new HmacAuthenticationPolicy(this.credential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
return authPolicy;
} | HttpPipelinePolicy authPolicy; | private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.accessKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.accessKeyCredential != null) {
return new HmacAuthenticationPolicy(this.accessKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
} | class CommunicationIdentityClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_ADMINISTRATION_PROPERTIES =
"azure-communication-administration.properties";
private final ClientLogger logger = new ClientLogger(CommunicationIdentityClientBuilder.class);
private String endpoint;
private CommunicationClientCredential credential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private Configuration configuration;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_ADMINISTRATION_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationIdentityClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is null.
*/
public CommunicationIdentityClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Set credential to use
*
* @param accessKey access key for initalizing CommunicationClientCredential
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder accessKey(String accessKey) {
Objects.requireNonNull(accessKey, "'accessKey' cannot be null.");
this.credential = new CommunicationClientCredential(accessKey);
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
ConnectionString connectionStringObject = new ConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.accessKey(accessKey);
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder configuration(Configuration configuration) {
this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null.");
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Sets the {@link CommunicationIdentityServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationIdentityServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder serviceVersion(CommunicationIdentityServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationIdentityAsyncClient instance
*/
public CommunicationIdentityAsyncClient buildAsyncClient() {
return new CommunicationIdentityAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationIdentityClient instance
*/
public CommunicationIdentityClient buildClient() {
return new CommunicationIdentityClient(buildAsyncClient());
}
private CommunicationIdentityClientImpl createServiceImpl() {
Objects.requireNonNull(endpoint);
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationIdentityClientImplBuilder clientBuilder = new CommunicationIdentityClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
policies.add(authorizationPolicy);
applyRequiredPolicies(policies);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, configuration));
policies.add(new RetryPolicy());
policies.add(new CookiePolicy());
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} | class CommunicationIdentityClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_ADMINISTRATION_PROPERTIES =
"azure-communication-administration.properties";
private final ClientLogger logger = new ClientLogger(CommunicationIdentityClientBuilder.class);
private String endpoint;
private CommunicationClientCredential accessKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private Configuration configuration;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_ADMINISTRATION_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationIdentityClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is null.
*/
public CommunicationIdentityClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Set credential to use
*
* @param accessKey access key for initalizing CommunicationClientCredential
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder accessKey(String accessKey) {
Objects.requireNonNull(accessKey, "'accessKey' cannot be null.");
this.accessKeyCredential = new CommunicationClientCredential(accessKey);
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
ConnectionString connectionStringObject = new ConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.accessKey(accessKey);
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder configuration(Configuration configuration) {
this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null.");
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Sets the {@link CommunicationIdentityServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationIdentityServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder serviceVersion(CommunicationIdentityServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationIdentityAsyncClient instance
*/
public CommunicationIdentityAsyncClient buildAsyncClient() {
return new CommunicationIdentityAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationIdentityClient instance
*/
public CommunicationIdentityClient buildClient() {
return new CommunicationIdentityClient(buildAsyncClient());
}
private CommunicationIdentityClientImpl createServiceImpl() {
Objects.requireNonNull(endpoint);
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationIdentityClientImplBuilder clientBuilder = new CommunicationIdentityClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
policies.add(authorizationPolicy);
applyRequiredPolicies(policies);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, configuration));
policies.add(new RetryPolicy());
policies.add(new CookiePolicy());
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} |
Why are we using concatMap rather than flatMap? I'm confused by this chain. | void createTransactionAndRollbackMessagesTest(MessagingEntityType entityType) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(OPERATION_TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiver.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
StepVerifier.create(receiver.receiveMessages()
.concatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next())
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
StepVerifier.create(receiver.rollbackTransaction(transaction.get()))
.verifyComplete();
} | .concatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next()) | void createTransactionAndRollbackMessagesTest(MessagingEntityType entityType) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(OPERATION_TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiver.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
StepVerifier.create(receiver.receiveMessages()
.flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1))
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
StepVerifier.create(receiver.rollbackTransaction(transaction.get()))
.verifyComplete();
} | class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase {
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class);
private final AtomicInteger messagesPending = new AtomicInteger();
private final boolean isSessionEnabled = false;
private ServiceBusReceiverAsyncClient receiver;
private ServiceBusSenderAsyncClient sender;
private ServiceBusSessionReceiverAsyncClient sessionReceiver;
ServiceBusReceiverAsyncClientIntegrationTest() {
super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class));
}
@Override
protected void beforeTest() {
sessionId = UUID.randomUUID().toString();
}
@Override
protected void afterTest() {
sharedBuilder = null;
try {
dispose(receiver, sender, sessionReceiver);
} catch (Exception e) {
logger.warning("Error occurred when draining queue.", e);
}
}
/**
* Verifies that we can create multiple transaction using sender and receiver.
*/
@Test
void createMultipleTransactionTest() {
setSenderAndReceiver(MessagingEntityType.QUEUE, 0, isSessionEnabled);
StepVerifier.create(receiver.createTransaction())
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(receiver.createTransaction())
.assertNext(Assertions::assertNotNull)
.verifyComplete();
}
/**
* Verifies that we can create transaction and complete.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
/**
* Verifies that we can do following using shared connection and on non session entity. 1. create transaction 2.
* receive and settle with transactionContext. 3. commit Rollback this transaction.
*/
@ParameterizedTest
@EnumSource(DispositionStatus.class)
void transactionSendReceiveAndCommit(DispositionStatus dispositionStatus) {
final MessagingEntityType entityType = MessagingEntityType.QUEUE;
setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_TRANSACTION_SENDRECEIVE_AND_COMPLETE, isSessionEnabled);
final String messageId1 = UUID.randomUUID().toString();
final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled);
final String deadLetterReason = "test reason";
sendMessage(message1).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiver.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
assertNotNull(transaction.get());
final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT);
assertNotNull(receivedMessage);
final Mono<Void> operation;
switch (dispositionStatus) {
case COMPLETED:
operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get()));
messagesPending.decrementAndGet();
break;
case ABANDONED:
operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(transaction.get()));
break;
case SUSPENDED:
DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setTransactionContext(transaction.get())
.setDeadLetterReason(deadLetterReason);
operation = receiver.deadLetter(receivedMessage, deadLetterOptions);
messagesPending.decrementAndGet();
break;
case DEFERRED:
operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(transaction.get()));
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException(
"Disposition status not recognized for this test case: " + dispositionStatus));
}
StepVerifier.create(operation)
.verifyComplete();
StepVerifier.create(receiver.commitTransaction(transaction.get()))
.verifyComplete();
}
/**
* Verifies that we can do following on different clients i.e. sender and receiver. 1. create transaction using
* sender 2. receive and complete with transactionContext. 3. Commit this transaction using sender.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
@Disabled
void transactionReceiveCompleteCommitMixClient(MessagingEntityType entityType) {
final boolean shareConnection = true;
final boolean useCredentials = false;
final int entityIndex = 0;
this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection)
.buildAsyncClient();
this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.buildAsyncClient();
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(sender.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
assertNotNull(transaction.get());
final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT);
assertNotNull(receivedMessage);
StepVerifier.create(receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get())))
.verifyComplete();
StepVerifier.create(sender.commitTransaction(transaction.get()))
.verifyComplete();
}
/**
* Verifies that we can send and receive two messages.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveTwoMessagesAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
final int entityIndex = 0;
final boolean shareConnection = false;
final boolean useCredentials = false;
final Duration shortWait = Duration.ofSeconds(3);
this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection)
.buildAsyncClient();
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
Mono.when(sendMessage(message), sendMessage(message)).block(TIMEOUT);
if (isSessionEnabled) {
assertNotNull(sessionId, "'sessionId' should have been set.");
this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.buildAsyncClient();
this.receiver = sessionReceiver.acceptSession(sessionId).block();
} else {
this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.buildAsyncClient();
}
StepVerifier.create(receiver.receiveMessages())
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
})
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
})
.thenAwait(shortWait)
.thenCancel()
.verify();
}
/**
* Verifies that we can send and receive a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveMessageAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
final int entityIndex = 0;
final boolean shareConnection = false;
final boolean useCredentials = false;
final Duration shortWait = Duration.ofSeconds(3);
this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection)
.buildAsyncClient();
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
if (isSessionEnabled) {
assertNotNull(sessionId, "'sessionId' should have been set.");
this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.buildAsyncClient();
this.receiver = this.sessionReceiver.acceptSession(sessionId).block();
} else {
this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.buildAsyncClient();
}
StepVerifier.create(receiver.receiveMessages())
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
})
.thenAwait(shortWait)
.thenCancel()
.verify();
StepVerifier.create(receiver.receiveMessages())
.thenAwait(shortWait)
.thenCancel()
.verify();
}
/**
* Verifies that we can send and peek a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(receiver.peekMessage())
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.verifyComplete();
StepVerifier.create(receiver.receiveMessages().concatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next())
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.verifyComplete();
}
/**
* Verifies that an empty entity does not error when peeking.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessageEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) {
setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled);
final int fromSequenceNumber = 1;
StepVerifier.create(receiver.peekMessageAt(fromSequenceNumber))
.verifyComplete();
}
/**
* Verifies that we can schedule and receive a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void sendScheduledMessageAndReceive(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final Duration shortDelay = Duration.ofSeconds(4);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(2);
sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(Mono.delay(shortDelay).then(receiver.receiveMessages()
.concatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next()))
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
}
/**
* Verifies that we can cancel a scheduled message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void cancelScheduledMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(10);
final Duration delayDuration = Duration.ofSeconds(3);
final Long sequenceNumber = sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT);
logger.verbose("Scheduled the message, sequence number {}.", sequenceNumber);
assertNotNull(sequenceNumber);
Mono.delay(delayDuration)
.then(sender.cancelScheduledMessage(sequenceNumber))
.block(TIMEOUT);
messagesPending.decrementAndGet();
logger.verbose("Cancelled the scheduled message, sequence number {}.", sequenceNumber);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages().take(1))
.thenAwait(Duration.ofSeconds(5))
.thenCancel()
.verify();
}
/**
* Verifies that we can send and peek a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekFromSequenceNumberMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
final int entityIndex = 3;
setSender(entityType, entityIndex, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, entityIndex, isSessionEnabled);
final ServiceBusReceivedMessage peekMessage = receiver.peekMessage().block(TIMEOUT);
assertNotNull(peekMessage);
final long sequenceNumber = peekMessage.getSequenceNumber();
try {
StepVerifier.create(receiver.peekMessageAt(sequenceNumber))
.assertNext(m -> {
assertEquals(sequenceNumber, m.getSequenceNumber());
assertMessageEquals(m, messageId, isSessionEnabled);
})
.verifyComplete();
} finally {
StepVerifier.create(receiver.receiveMessages()
.concatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage))
.next())
.expectNextCount(1)
.verifyComplete();
messagesPending.decrementAndGet();
}
}
/**
* Verifies that we can send and peek a batch of messages and the sequence number is tracked correctly.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessages(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled);
final BiConsumer<ServiceBusReceivedMessage, Integer> checkCorrectMessage = (message, index) -> {
final Map<String, Object> properties = message.getApplicationProperties();
final Object value = properties.get(MESSAGE_POSITION_ID);
assertTrue(value instanceof Integer, "Did not contain correct position number: " + value);
final int position = (int) value;
assertEquals(index, position);
};
final String messageId = UUID.randomUUID().toString();
final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(10, messageId, CONTENTS_BYTES);
if (isSessionEnabled) {
messages.forEach(m -> m.setSessionId(sessionId));
}
sender.sendMessages(messages)
.doOnSuccess(aVoid -> {
int number = messagesPending.addAndGet(messages.size());
logger.info("Number of messages sent: {}", number);
})
.block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled);
try {
StepVerifier.create(receiver.peekMessages(3))
.assertNext(message -> checkCorrectMessage.accept(message, 0))
.assertNext(message -> checkCorrectMessage.accept(message, 1))
.assertNext(message -> checkCorrectMessage.accept(message, 2))
.verifyComplete();
StepVerifier.create(receiver.peekMessages(4))
.assertNext(message -> checkCorrectMessage.accept(message, 3))
.assertNext(message -> checkCorrectMessage.accept(message, 4))
.assertNext(message -> checkCorrectMessage.accept(message, 5))
.assertNext(message -> checkCorrectMessage.accept(message, 6))
.verifyComplete();
StepVerifier.create(receiver.peekMessage())
.assertNext(message -> checkCorrectMessage.accept(message, 7))
.verifyComplete();
} finally {
AtomicInteger completed = new AtomicInteger();
StepVerifier.create(receiver.receiveMessages().take(messages.size()))
.thenConsumeWhile(receivedMessage -> {
completed.incrementAndGet();
receiver.complete(receivedMessage).block(OPERATION_TIMEOUT);
return completed.get() <= messages.size();
})
.thenCancel()
.verify();
messagesPending.addAndGet(-messages.size());
}
}
/**
* Verifies that we can send and peek a batch of messages.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessagesFromSequence(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_MESSAGE_FROM_SEQUENCE, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
final int maxMessages = 2;
final int fromSequenceNumber = 1;
Mono.when(sendMessage(message), sendMessage(message)).block(TIMEOUT);
StepVerifier.create(receiver.peekMessagesAt(maxMessages, fromSequenceNumber))
.expectNextCount(maxMessages)
.verifyComplete();
StepVerifier.create(receiver.receiveMessages().take(maxMessages))
.assertNext(receivedMessage -> {
receiver.complete(receivedMessage).block(Duration.ofSeconds(15));
})
.assertNext(receivedMessage -> {
receiver.complete(receivedMessage).block(Duration.ofSeconds(15));
})
.expectComplete()
.verify(TIMEOUT);
}
/**
* Verifies that an empty entity does not error when peeking.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessagesFromSequenceEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) {
setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled);
final int maxMessages = 10;
final int fromSequenceNumber = 1;
StepVerifier.create(receiver.peekMessagesAt(maxMessages, fromSequenceNumber))
.verifyComplete();
}
/**
* Verifies that we can dead-letter a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void deadLetterMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
final int entityIndex = 0;
setSender(entityType, entityIndex, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, entityIndex, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages()
.concatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage))
.next())
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
})
.thenCancel()
.verify();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages()
.concatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage))
.next())
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
messagesPending.decrementAndGet();
}
/**
* Verifies that we can renew message lock on a non-session receiver.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndRenewLock(MessagingEntityType entityType) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, false);
final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT);
assertNotNull(receivedMessage);
assertNotNull(receivedMessage.getLockedUntil());
final OffsetDateTime initialLock = receivedMessage.getLockedUntil();
logger.info("Received message. Seq: {}. lockedUntil: {}", receivedMessage.getSequenceNumber(), initialLock);
try {
StepVerifier.create(Mono.delay(Duration.ofSeconds(7))
.then(Mono.defer(() -> receiver.renewMessageLock(receivedMessage))))
.assertNext(lockedUntil -> {
assertTrue(lockedUntil.isAfter(initialLock),
String.format("Updated lock is not after the initial Lock. updated: [%s]. initial:[%s]",
lockedUntil, initialLock));
})
.verifyComplete();
} finally {
logger.info("Completing message. Seq: {}.", receivedMessage.getSequenceNumber());
receiver.complete(receivedMessage)
.doOnSuccess(aVoid -> messagesPending.decrementAndGet())
.block(TIMEOUT);
}
}
/**
* Verifies that the lock can be automatically renewed.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void autoRenewLockOnReceiveMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
final AtomicInteger lockRenewCount = new AtomicInteger();
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages().concatMap(received -> {
logger.info("{}: lockToken[{}]. lockedUntil[{}]. now[{}]", received.getSequenceNumber(),
received.getLockToken(), received.getLockedUntil(), OffsetDateTime.now());
while (lockRenewCount.get() < 4) {
lockRenewCount.incrementAndGet();
logger.info("Iteration {}: Curren time {}.", lockRenewCount.get(), OffsetDateTime.now());
try {
TimeUnit.SECONDS.sleep(5);
} catch (InterruptedException error) {
logger.error("Error occurred while sleeping: " + error);
}
}
return receiver.complete(received).thenReturn(received);
}))
.assertNext(received -> {
assertTrue(lockRenewCount.get() > 0);
messagesPending.decrementAndGet();
})
.thenCancel()
.verify();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndAbandon(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages()
.concatMap(receivedMessage -> receiver.abandon(receivedMessage).thenReturn(receivedMessage))
.next())
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndDefer(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled);
AtomicReference<ServiceBusReceivedMessage> received = new AtomicReference<>();
StepVerifier.create(receiver.receiveMessages()
.concatMap(receivedMessage -> receiver.defer(receivedMessage).thenReturn(receivedMessage))
.next())
.assertNext(m -> {
received.set(m);
assertMessageEquals(m, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
receiver.receiveDeferredMessage(received.get().getSequenceNumber())
.flatMap(m -> receiver.complete(m))
.block(TIMEOUT);
messagesPending.decrementAndGet();
}
/**
* Test we can receive a deferred message via sequence number and then perform abandon, suspend, or complete on it.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveDeferredMessageBySequenceNumber(MessagingEntityType entityType, DispositionStatus dispositionStatus) {
setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFERRED_MESSAGE_BY_SEQUENCE_NUMBER, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT);
assertNotNull(receivedMessage);
receiver.defer(receivedMessage).block(TIMEOUT);
final ServiceBusReceivedMessage receivedDeferredMessage = receiver
.receiveDeferredMessage(receivedMessage.getSequenceNumber())
.block(TIMEOUT);
assertNotNull(receivedDeferredMessage);
assertEquals(receivedMessage.getSequenceNumber(), receivedDeferredMessage.getSequenceNumber());
final Mono<Void> operation;
switch (dispositionStatus) {
case ABANDONED:
operation = receiver.abandon(receivedDeferredMessage);
break;
case SUSPENDED:
operation = receiver.deadLetter(receivedDeferredMessage);
break;
case COMPLETED:
operation = receiver.complete(receivedDeferredMessage);
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException(
"Disposition status not recognized for this test case: " + dispositionStatus));
}
StepVerifier.create(operation)
.expectComplete()
.verify();
if (dispositionStatus != DispositionStatus.COMPLETED) {
messagesPending.decrementAndGet();
}
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void sendReceiveMessageWithVariousPropertyTypes(MessagingEntityType entityType) {
final boolean isSessionEnabled = true;
setSender(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage messageToSend = getMessage(messageId, isSessionEnabled);
Map<String, Object> sentProperties = messageToSend.getApplicationProperties();
sentProperties.put("NullProperty", null);
sentProperties.put("BooleanProperty", true);
sentProperties.put("ByteProperty", (byte) 1);
sentProperties.put("ShortProperty", (short) 2);
sentProperties.put("IntProperty", 3);
sentProperties.put("LongProperty", 4L);
sentProperties.put("FloatProperty", 5.5f);
sentProperties.put("DoubleProperty", 6.6f);
sentProperties.put("CharProperty", 'z');
sentProperties.put("UUIDProperty", UUID.fromString("38400000-8cf0-11bd-b23e-10b96e4ef00d"));
sentProperties.put("StringProperty", "string");
sendMessage(messageToSend).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages().concatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next())
.assertNext(receivedMessage -> {
messagesPending.decrementAndGet();
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
final Map<String, Object> received = receivedMessage.getApplicationProperties();
assertEquals(sentProperties.size(), received.size());
for (Map.Entry<String, Object> sentEntry : sentProperties.entrySet()) {
if (sentEntry.getValue() != null && sentEntry.getValue().getClass().isArray()) {
assertArrayEquals((Object[]) sentEntry.getValue(), (Object[]) received.get(sentEntry.getKey()));
} else {
final Object expected = sentEntry.getValue();
final Object actual = received.get(sentEntry.getKey());
assertEquals(expected, actual, String.format(
"Key '%s' does not match. Expected: '%s'. Actual: '%s'", sentEntry.getKey(), expected,
actual));
}
}
})
.thenCancel()
.verify();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void setAndGetSessionState(MessagingEntityType entityType) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, true);
final byte[] sessionState = "Finished".getBytes(UTF_8);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage messageToSend = getMessage(messageId, true);
sendMessage(messageToSend).block(Duration.ofSeconds(10));
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, true);
StepVerifier.create(receiver.receiveMessages()
.take(1)
.flatMap(message -> {
logger.info("SessionId: {}. LockToken: {}. LockedUntil: {}. Message received.",
message.getSessionId(), message.getLockToken(), message.getLockedUntil());
receiver.complete(message).block(Duration.ofSeconds(15));
messagesPending.decrementAndGet();
return receiver.setSessionState(sessionState);
}))
.expectComplete()
.verify();
StepVerifier.create(receiver.getSessionState())
.assertNext(state -> {
logger.info("State received: {}", new String(state, UTF_8));
assertArrayEquals(sessionState, state);
})
.verifyComplete();
}
/**
* Verifies that we can receive a message from dead letter queue.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveFromDeadLetter(MessagingEntityType entityType) {
final Duration shortWait = Duration.ofSeconds(2);
final boolean isSessionEnabled = false;
final int entityIndex = 0;
setSender(entityType, entityIndex, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final List<ServiceBusReceivedMessage> receivedMessages = new ArrayList<>();
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, entityIndex, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages().concatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).next())
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
final ServiceBusReceiverAsyncClient deadLetterReceiver;
switch (entityType) {
case QUEUE:
final String queueName = getQueueName(entityIndex);
assertNotNull(queueName, "'queueName' cannot be null.");
deadLetterReceiver = getBuilder(false).receiver()
.queueName(queueName)
.subQueue(SubQueue.DEAD_LETTER_QUEUE)
.buildAsyncClient();
break;
case SUBSCRIPTION:
final String topicName = getTopicName(entityIndex);
final String subscriptionName = getSubscriptionBaseName();
assertNotNull(topicName, "'topicName' cannot be null.");
assertNotNull(subscriptionName, "'subscriptionName' cannot be null.");
deadLetterReceiver = getBuilder(false).receiver()
.topicName(topicName)
.subscriptionName(subscriptionName)
.subQueue(SubQueue.DEAD_LETTER_QUEUE)
.buildAsyncClient();
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Unknown entity type: " + entityType));
}
StepVerifier.create(deadLetterReceiver.receiveMessages())
.assertNext(serviceBusReceivedMessage -> {
receivedMessages.add(serviceBusReceivedMessage);
assertMessageEquals(serviceBusReceivedMessage, messageId, isSessionEnabled);
})
.thenAwait(shortWait)
.thenCancel()
.verify();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void renewMessageLock(MessagingEntityType entityType) {
final boolean isSessionEnabled = false;
setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final Duration maximumDuration = Duration.ofSeconds(35);
final Duration sleepDuration = maximumDuration.plusMillis(500);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final ServiceBusReceivedMessage receivedMessage = sendMessage(message)
.then(receiver.receiveMessages().next())
.block(TIMEOUT);
assertNotNull(receivedMessage);
final OffsetDateTime lockedUntil = receivedMessage.getLockedUntil();
assertNotNull(lockedUntil);
StepVerifier.create(receiver.renewMessageLock(receivedMessage, maximumDuration))
.thenAwait(sleepDuration)
.then(() -> {
logger.info("Completing message.");
int numberCompleted = completeMessages(receiver, Collections.singletonList(receivedMessage));
messagesPending.addAndGet(-numberCompleted);
})
.expectComplete()
.verify(Duration.ofMinutes(3));
}
/**
* Verifies that we can receive a message which have different section set (i.e header, footer, annotations,
* application properties etc).
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndValidateProperties(MessagingEntityType entityType) {
final boolean isSessionEnabled = false;
final int totalMessages = 1;
final String subject = "subject";
final Map<String, Object> footer = new HashMap<>();
footer.put("footer-key-1", "footer-value-1");
footer.put("footer-key-2", "footer-value-2");
final Map<String, Object> applicationProperties = new HashMap<>();
applicationProperties.put("ap-key-1", "ap-value-1");
applicationProperties.put("ap-key-2", "ap-value-2");
final Map<String, Object> deliveryAnnotation = new HashMap<>();
deliveryAnnotation.put("delivery-annotations-key-1", "delivery-annotations-value-1");
deliveryAnnotation.put("delivery-annotations-key-2", "delivery-annotations-value-2");
final String messageId = UUID.randomUUID().toString();
final AmqpAnnotatedMessage expectedAmqpProperties = new AmqpAnnotatedMessage(
AmqpMessageBody.fromData(CONTENTS_BYTES));
expectedAmqpProperties.getProperties().setSubject(subject);
expectedAmqpProperties.getProperties().setReplyToGroupId("r-gid");
expectedAmqpProperties.getProperties().setReplyTo(new AmqpAddress("reply-to"));
expectedAmqpProperties.getProperties().setContentType("content-type");
expectedAmqpProperties.getProperties().setCorrelationId(new AmqpMessageId("correlation-id"));
expectedAmqpProperties.getProperties().setTo(new AmqpAddress("to"));
expectedAmqpProperties.getProperties().setAbsoluteExpiryTime(OffsetDateTime.now().plusSeconds(60));
expectedAmqpProperties.getProperties().setUserId("user-id-1".getBytes());
expectedAmqpProperties.getProperties().setContentEncoding("string");
expectedAmqpProperties.getProperties().setGroupSequence(2L);
expectedAmqpProperties.getProperties().setCreationTime(OffsetDateTime.now().plusSeconds(30));
expectedAmqpProperties.getHeader().setPriority((short) 2);
expectedAmqpProperties.getHeader().setFirstAcquirer(true);
expectedAmqpProperties.getHeader().setDurable(true);
expectedAmqpProperties.getFooter().putAll(footer);
expectedAmqpProperties.getDeliveryAnnotations().putAll(deliveryAnnotation);
expectedAmqpProperties.getApplicationProperties().putAll(applicationProperties);
final ServiceBusMessage message = TestUtils.getServiceBusMessage(CONTENTS_BYTES, messageId);
final AmqpAnnotatedMessage amqpAnnotatedMessage = message.getRawAmqpMessage();
amqpAnnotatedMessage.getMessageAnnotations().putAll(expectedAmqpProperties.getMessageAnnotations());
amqpAnnotatedMessage.getApplicationProperties().putAll(expectedAmqpProperties.getApplicationProperties());
amqpAnnotatedMessage.getDeliveryAnnotations().putAll(expectedAmqpProperties.getDeliveryAnnotations());
amqpAnnotatedMessage.getFooter().putAll(expectedAmqpProperties.getFooter());
final AmqpMessageHeader header = amqpAnnotatedMessage.getHeader();
header.setFirstAcquirer(expectedAmqpProperties.getHeader().isFirstAcquirer());
header.setTimeToLive(expectedAmqpProperties.getHeader().getTimeToLive());
header.setDurable(expectedAmqpProperties.getHeader().isDurable());
header.setDeliveryCount(expectedAmqpProperties.getHeader().getDeliveryCount());
header.setPriority(expectedAmqpProperties.getHeader().getPriority());
final AmqpMessageProperties amqpMessageProperties = amqpAnnotatedMessage.getProperties();
amqpMessageProperties.setReplyTo((expectedAmqpProperties.getProperties().getReplyTo()));
amqpMessageProperties.setContentEncoding((expectedAmqpProperties.getProperties().getContentEncoding()));
amqpMessageProperties.setAbsoluteExpiryTime((expectedAmqpProperties.getProperties().getAbsoluteExpiryTime()));
amqpMessageProperties.setSubject((expectedAmqpProperties.getProperties().getSubject()));
amqpMessageProperties.setContentType(expectedAmqpProperties.getProperties().getContentType());
amqpMessageProperties.setCorrelationId(expectedAmqpProperties.getProperties().getCorrelationId());
amqpMessageProperties.setTo(expectedAmqpProperties.getProperties().getTo());
amqpMessageProperties.setGroupSequence(expectedAmqpProperties.getProperties().getGroupSequence());
amqpMessageProperties.setUserId(expectedAmqpProperties.getProperties().getUserId());
amqpMessageProperties.setAbsoluteExpiryTime(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime());
amqpMessageProperties.setCreationTime(expectedAmqpProperties.getProperties().getCreationTime());
amqpMessageProperties.setReplyToGroupId(expectedAmqpProperties.getProperties().getReplyToGroupId());
setSender(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages()/*.take(totalMessages)*/)
.assertNext(received -> {
assertNotNull(received.getLockToken());
AmqpAnnotatedMessage actual = received.getRawAmqpMessage();
try {
assertArrayEquals(CONTENTS_BYTES, message.getBody().toBytes());
assertEquals(expectedAmqpProperties.getHeader().getPriority(), actual.getHeader().getPriority());
assertEquals(expectedAmqpProperties.getHeader().isFirstAcquirer(), actual.getHeader().isFirstAcquirer());
assertEquals(expectedAmqpProperties.getHeader().isDurable(), actual.getHeader().isDurable());
assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject());
assertEquals(expectedAmqpProperties.getProperties().getReplyToGroupId(), actual.getProperties().getReplyToGroupId());
assertEquals(expectedAmqpProperties.getProperties().getReplyTo(), actual.getProperties().getReplyTo());
assertEquals(expectedAmqpProperties.getProperties().getContentType(), actual.getProperties().getContentType());
assertEquals(expectedAmqpProperties.getProperties().getCorrelationId(), actual.getProperties().getCorrelationId());
assertEquals(expectedAmqpProperties.getProperties().getTo(), actual.getProperties().getTo());
assertEquals(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime().toEpochSecond(), actual.getProperties().getAbsoluteExpiryTime().toEpochSecond());
assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject());
assertEquals(expectedAmqpProperties.getProperties().getContentEncoding(), actual.getProperties().getContentEncoding());
assertEquals(expectedAmqpProperties.getProperties().getGroupSequence(), actual.getProperties().getGroupSequence());
assertEquals(expectedAmqpProperties.getProperties().getCreationTime().toEpochSecond(), actual.getProperties().getCreationTime().toEpochSecond());
assertArrayEquals(expectedAmqpProperties.getProperties().getUserId(), actual.getProperties().getUserId());
assertMapValues(expectedAmqpProperties.getDeliveryAnnotations(), actual.getDeliveryAnnotations());
assertMapValues(expectedAmqpProperties.getMessageAnnotations(), actual.getMessageAnnotations());
assertMapValues(expectedAmqpProperties.getApplicationProperties(), actual.getApplicationProperties());
assertMapValues(expectedAmqpProperties.getFooter(), actual.getFooter());
} finally {
logger.info("Completing message.");
receiver.complete(received).block(Duration.ofSeconds(15));
messagesPending.decrementAndGet();
}
})
.thenCancel()
.verify(Duration.ofMinutes(2));
}
/**
* Verifies we can autocomplete for a queue.
*
* @param entityType Entity Type.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void autoComplete(MessagingEntityType entityType) {
final Duration shortWait = Duration.ofSeconds(2);
final int index = TestUtils.USE_CASE_AUTO_COMPLETE;
setSender(entityType, index, false);
final int numberOfEvents = 3;
final String messageId = UUID.randomUUID().toString();
final List<ServiceBusMessage> messages = getServiceBusMessages(numberOfEvents, messageId);
setReceiver(entityType, index, false);
final ServiceBusReceivedMessage lastMessage = receiver.peekMessage().block(TIMEOUT);
Mono.when(messages.stream().map(this::sendMessage)
.collect(Collectors.toList()))
.block(TIMEOUT);
final ServiceBusReceiverAsyncClient autoCompleteReceiver =
getReceiverBuilder(false, entityType, index, false)
.buildAsyncClient();
try {
StepVerifier.create(autoCompleteReceiver.receiveMessages())
.assertNext(receivedMessage -> {
if (lastMessage != null) {
assertEquals(lastMessage.getMessageId(), receivedMessage.getMessageId());
} else {
assertEquals(messageId, receivedMessage.getMessageId());
}
})
.assertNext(context -> {
if (lastMessage == null) {
assertEquals(messageId, context.getMessageId());
}
})
.assertNext(context -> {
if (lastMessage == null) {
assertEquals(messageId, context.getMessageId());
}
})
.thenAwait(shortWait)
.thenCancel()
.verify(TIMEOUT);
} finally {
autoCompleteReceiver.close();
}
final ServiceBusReceivedMessage newLastMessage = receiver.peekMessage().block(TIMEOUT);
if (lastMessage == null) {
assertNull(newLastMessage,
String.format("Actual messageId[%s]", newLastMessage != null ? newLastMessage.getMessageId() : "n/a"));
} else {
assertNotNull(newLastMessage);
assertEquals(lastMessage.getSequenceNumber(), newLastMessage.getSequenceNumber());
}
}
/**
* Asserts the length and values with in the map.
*/
private void assertMapValues(Map<String, Object> expectedMap, Map<String, Object> actualMap) {
assertTrue(actualMap.size() >= expectedMap.size());
for (String key : expectedMap.keySet()) {
assertEquals(expectedMap.get(key), actualMap.get(key), "Value is not equal for Key " + key);
}
}
/**
* Sets the sender and receiver. If session is enabled, then a single-named session receiver is created.
*/
private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) {
setSender(entityType, entityIndex, isSessionEnabled);
setReceiver(entityType, entityIndex, isSessionEnabled);
}
private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) {
final boolean shareConnection = false;
final boolean useCredentials = false;
if (isSessionEnabled) {
assertNotNull(sessionId, "'sessionId' should have been set.");
sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.disableAutoComplete()
.buildAsyncClient();
this.receiver = sessionReceiver.acceptSession(sessionId).block();
} else {
this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.disableAutoComplete()
.buildAsyncClient();
}
}
private void setSender(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) {
final boolean shareConnection = false;
final boolean useCredentials = false;
this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection)
.buildAsyncClient();
}
private Mono<Void> sendMessage(ServiceBusMessage message) {
return sender.sendMessage(message).doOnSuccess(aVoid -> {
int number = messagesPending.incrementAndGet();
logger.info("Message Id {}. Number sent: {}", message.getMessageId(), number);
});
}
private int completeMessages(ServiceBusReceiverAsyncClient client, List<ServiceBusReceivedMessage> messages) {
Mono.when(messages.stream().map(e -> client.complete(e))
.collect(Collectors.toList()))
.block(TIMEOUT);
return messages.size();
}
} | class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase {
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class);
private final AtomicInteger messagesPending = new AtomicInteger();
private final boolean isSessionEnabled = false;
private ServiceBusReceiverAsyncClient receiver;
private ServiceBusSenderAsyncClient sender;
private ServiceBusSessionReceiverAsyncClient sessionReceiver;
ServiceBusReceiverAsyncClientIntegrationTest() {
super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class));
}
@Override
protected void beforeTest() {
sessionId = UUID.randomUUID().toString();
}
@Override
protected void afterTest() {
sharedBuilder = null;
try {
dispose(receiver, sender, sessionReceiver);
} catch (Exception e) {
logger.warning("Error occurred when draining queue.", e);
}
}
/**
* Verifies that we can create multiple transaction using sender and receiver.
*/
@Test
void createMultipleTransactionTest() {
setSenderAndReceiver(MessagingEntityType.QUEUE, 0, isSessionEnabled);
StepVerifier.create(receiver.createTransaction())
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(receiver.createTransaction())
.assertNext(Assertions::assertNotNull)
.verifyComplete();
}
/**
* Verifies that we can create transaction and complete.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
/**
* Verifies that we can do following using shared connection and on non session entity. 1. create transaction 2.
* receive and settle with transactionContext. 3. commit Rollback this transaction.
*/
@ParameterizedTest
@EnumSource(DispositionStatus.class)
void transactionSendReceiveAndCommit(DispositionStatus dispositionStatus) {
final MessagingEntityType entityType = MessagingEntityType.QUEUE;
setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_TRANSACTION_SENDRECEIVE_AND_COMPLETE, isSessionEnabled);
final String messageId1 = UUID.randomUUID().toString();
final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled);
final String deadLetterReason = "test reason";
sendMessage(message1).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiver.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
assertNotNull(transaction.get());
final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT);
assertNotNull(receivedMessage);
final Mono<Void> operation;
switch (dispositionStatus) {
case COMPLETED:
operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get()));
messagesPending.decrementAndGet();
break;
case ABANDONED:
operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(transaction.get()));
break;
case SUSPENDED:
DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setTransactionContext(transaction.get())
.setDeadLetterReason(deadLetterReason);
operation = receiver.deadLetter(receivedMessage, deadLetterOptions);
messagesPending.decrementAndGet();
break;
case DEFERRED:
operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(transaction.get()));
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException(
"Disposition status not recognized for this test case: " + dispositionStatus));
}
StepVerifier.create(operation)
.verifyComplete();
StepVerifier.create(receiver.commitTransaction(transaction.get()))
.verifyComplete();
}
/**
* Verifies that we can do following on different clients i.e. sender and receiver. 1. create transaction using
* sender 2. receive and complete with transactionContext. 3. Commit this transaction using sender.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
@Disabled
void transactionReceiveCompleteCommitMixClient(MessagingEntityType entityType) {
final boolean shareConnection = true;
final boolean useCredentials = false;
final int entityIndex = 0;
this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection)
.buildAsyncClient();
this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.buildAsyncClient();
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(sender.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
assertNotNull(transaction.get());
final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT);
assertNotNull(receivedMessage);
StepVerifier.create(receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get())))
.verifyComplete();
StepVerifier.create(sender.commitTransaction(transaction.get()))
.verifyComplete();
}
/**
* Verifies that we can send and receive two messages.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveTwoMessagesAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
final int entityIndex = 0;
final boolean shareConnection = false;
final boolean useCredentials = false;
final Duration shortWait = Duration.ofSeconds(3);
this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection)
.buildAsyncClient();
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
Mono.when(sendMessage(message), sendMessage(message)).block(TIMEOUT);
if (isSessionEnabled) {
assertNotNull(sessionId, "'sessionId' should have been set.");
this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.buildAsyncClient();
this.receiver = sessionReceiver.acceptSession(sessionId).block();
} else {
this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.buildAsyncClient();
}
StepVerifier.create(receiver.receiveMessages())
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
})
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
})
.thenAwait(shortWait)
.thenCancel()
.verify();
}
/**
* Verifies that we can send and receive a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveMessageAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
final int entityIndex = 0;
final boolean shareConnection = false;
final boolean useCredentials = false;
final Duration shortWait = Duration.ofSeconds(3);
this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection)
.buildAsyncClient();
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
if (isSessionEnabled) {
assertNotNull(sessionId, "'sessionId' should have been set.");
this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.buildAsyncClient();
this.receiver = this.sessionReceiver.acceptSession(sessionId).block();
} else {
this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.buildAsyncClient();
}
StepVerifier.create(receiver.receiveMessages())
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
})
.thenAwait(shortWait)
.thenCancel()
.verify();
StepVerifier.create(receiver.receiveMessages())
.thenAwait(shortWait)
.thenCancel()
.verify();
}
/**
* Verifies that we can send and peek a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(receiver.peekMessage())
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.verifyComplete();
StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1))
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.verifyComplete();
}
/**
* Verifies that an empty entity does not error when peeking.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessageEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) {
setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled);
final int fromSequenceNumber = 1;
StepVerifier.create(receiver.peekMessageAt(fromSequenceNumber))
.verifyComplete();
}
/**
* Verifies that we can schedule and receive a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void sendScheduledMessageAndReceive(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final Duration shortDelay = Duration.ofSeconds(4);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(2);
sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(Mono.delay(shortDelay).then(receiver.receiveMessages()
.flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next()))
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
}
/**
* Verifies that we can cancel a scheduled message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void cancelScheduledMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(10);
final Duration delayDuration = Duration.ofSeconds(3);
final Long sequenceNumber = sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT);
logger.verbose("Scheduled the message, sequence number {}.", sequenceNumber);
assertNotNull(sequenceNumber);
Mono.delay(delayDuration)
.then(sender.cancelScheduledMessage(sequenceNumber))
.block(TIMEOUT);
messagesPending.decrementAndGet();
logger.verbose("Cancelled the scheduled message, sequence number {}.", sequenceNumber);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages().take(1))
.thenAwait(Duration.ofSeconds(5))
.thenCancel()
.verify();
}
/**
* Verifies that we can send and peek a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekFromSequenceNumberMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
final int entityIndex = 3;
setSender(entityType, entityIndex, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, entityIndex, isSessionEnabled);
final ServiceBusReceivedMessage peekMessage = receiver.peekMessage().block(TIMEOUT);
assertNotNull(peekMessage);
final long sequenceNumber = peekMessage.getSequenceNumber();
try {
StepVerifier.create(receiver.peekMessageAt(sequenceNumber))
.assertNext(m -> {
assertEquals(sequenceNumber, m.getSequenceNumber());
assertMessageEquals(m, messageId, isSessionEnabled);
})
.verifyComplete();
} finally {
StepVerifier.create(receiver.receiveMessages()
.flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1))
.expectNextCount(1)
.verifyComplete();
messagesPending.decrementAndGet();
}
}
/**
* Verifies that we can send and peek a batch of messages and the sequence number is tracked correctly.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessages(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled);
final BiConsumer<ServiceBusReceivedMessage, Integer> checkCorrectMessage = (message, index) -> {
final Map<String, Object> properties = message.getApplicationProperties();
final Object value = properties.get(MESSAGE_POSITION_ID);
assertTrue(value instanceof Integer, "Did not contain correct position number: " + value);
final int position = (int) value;
assertEquals(index, position);
};
final String messageId = UUID.randomUUID().toString();
final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(10, messageId, CONTENTS_BYTES);
if (isSessionEnabled) {
messages.forEach(m -> m.setSessionId(sessionId));
}
sender.sendMessages(messages)
.doOnSuccess(aVoid -> {
int number = messagesPending.addAndGet(messages.size());
logger.info("Number of messages sent: {}", number);
})
.block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled);
try {
StepVerifier.create(receiver.peekMessages(3))
.assertNext(message -> checkCorrectMessage.accept(message, 0))
.assertNext(message -> checkCorrectMessage.accept(message, 1))
.assertNext(message -> checkCorrectMessage.accept(message, 2))
.verifyComplete();
StepVerifier.create(receiver.peekMessages(4))
.assertNext(message -> checkCorrectMessage.accept(message, 3))
.assertNext(message -> checkCorrectMessage.accept(message, 4))
.assertNext(message -> checkCorrectMessage.accept(message, 5))
.assertNext(message -> checkCorrectMessage.accept(message, 6))
.verifyComplete();
StepVerifier.create(receiver.peekMessage())
.assertNext(message -> checkCorrectMessage.accept(message, 7))
.verifyComplete();
} finally {
AtomicInteger completed = new AtomicInteger();
StepVerifier.create(receiver.receiveMessages().take(messages.size()))
.thenConsumeWhile(receivedMessage -> {
completed.incrementAndGet();
receiver.complete(receivedMessage).block(OPERATION_TIMEOUT);
return completed.get() <= messages.size();
})
.thenCancel()
.verify();
messagesPending.addAndGet(-messages.size());
}
}
/**
* Verifies that we can send and peek a batch of messages.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessagesFromSequence(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_MESSAGE_FROM_SEQUENCE, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
final int maxMessages = 2;
final int fromSequenceNumber = 1;
Mono.when(sendMessage(message), sendMessage(message)).block(TIMEOUT);
StepVerifier.create(receiver.peekMessagesAt(maxMessages, fromSequenceNumber))
.expectNextCount(maxMessages)
.verifyComplete();
StepVerifier.create(receiver.receiveMessages().take(maxMessages))
.assertNext(receivedMessage -> {
receiver.complete(receivedMessage).block(Duration.ofSeconds(15));
})
.assertNext(receivedMessage -> {
receiver.complete(receivedMessage).block(Duration.ofSeconds(15));
})
.expectComplete()
.verify(TIMEOUT);
}
/**
* Verifies that an empty entity does not error when peeking.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessagesFromSequenceEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) {
setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled);
final int maxMessages = 10;
final int fromSequenceNumber = 1;
StepVerifier.create(receiver.peekMessagesAt(maxMessages, fromSequenceNumber))
.verifyComplete();
}
/**
* Verifies that we can dead-letter a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void deadLetterMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
final int entityIndex = 0;
setSender(entityType, entityIndex, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, entityIndex, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages()
.flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1))
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
})
.thenCancel()
.verify();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages()
.flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1))
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
messagesPending.decrementAndGet();
}
/**
* Verifies that we can renew message lock on a non-session receiver.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndRenewLock(MessagingEntityType entityType) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, false);
final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT);
assertNotNull(receivedMessage);
assertNotNull(receivedMessage.getLockedUntil());
final OffsetDateTime initialLock = receivedMessage.getLockedUntil();
logger.info("Received message. Seq: {}. lockedUntil: {}", receivedMessage.getSequenceNumber(), initialLock);
try {
StepVerifier.create(Mono.delay(Duration.ofSeconds(7))
.then(Mono.defer(() -> receiver.renewMessageLock(receivedMessage))))
.assertNext(lockedUntil -> {
assertTrue(lockedUntil.isAfter(initialLock),
String.format("Updated lock is not after the initial Lock. updated: [%s]. initial:[%s]",
lockedUntil, initialLock));
})
.verifyComplete();
} finally {
logger.info("Completing message. Seq: {}.", receivedMessage.getSequenceNumber());
receiver.complete(receivedMessage)
.doOnSuccess(aVoid -> messagesPending.decrementAndGet())
.block(TIMEOUT);
}
}
/**
* Verifies that the lock can be automatically renewed.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void autoRenewLockOnReceiveMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
final AtomicInteger lockRenewCount = new AtomicInteger();
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages().flatMap(received -> {
logger.info("{}: lockToken[{}]. lockedUntil[{}]. now[{}]", received.getSequenceNumber(),
received.getLockToken(), received.getLockedUntil(), OffsetDateTime.now());
while (lockRenewCount.get() < 4) {
lockRenewCount.incrementAndGet();
logger.info("Iteration {}: Curren time {}.", lockRenewCount.get(), OffsetDateTime.now());
try {
TimeUnit.SECONDS.sleep(5);
} catch (InterruptedException error) {
logger.error("Error occurred while sleeping: " + error);
}
}
return receiver.complete(received).thenReturn(received);
}))
.assertNext(received -> {
assertTrue(lockRenewCount.get() > 0);
messagesPending.decrementAndGet();
})
.thenCancel()
.verify();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndAbandon(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages()
.flatMap(receivedMessage -> receiver.abandon(receivedMessage).thenReturn(receivedMessage)).take(1))
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.expectComplete();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndDefer(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled);
AtomicReference<ServiceBusReceivedMessage> received = new AtomicReference<>();
StepVerifier.create(receiver.receiveMessages()
.flatMap(receivedMessage -> receiver.defer(receivedMessage).thenReturn(receivedMessage)).take(1))
.assertNext(m -> {
received.set(m);
assertMessageEquals(m, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
receiver.receiveDeferredMessage(received.get().getSequenceNumber())
.flatMap(m -> receiver.complete(m))
.block(TIMEOUT);
messagesPending.decrementAndGet();
}
/**
* Test we can receive a deferred message via sequence number and then perform abandon, suspend, or complete on it.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveDeferredMessageBySequenceNumber(MessagingEntityType entityType, DispositionStatus dispositionStatus) {
setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFERRED_MESSAGE_BY_SEQUENCE_NUMBER, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT);
assertNotNull(receivedMessage);
receiver.defer(receivedMessage).block(TIMEOUT);
final ServiceBusReceivedMessage receivedDeferredMessage = receiver
.receiveDeferredMessage(receivedMessage.getSequenceNumber())
.block(TIMEOUT);
assertNotNull(receivedDeferredMessage);
assertEquals(receivedMessage.getSequenceNumber(), receivedDeferredMessage.getSequenceNumber());
final Mono<Void> operation;
switch (dispositionStatus) {
case ABANDONED:
operation = receiver.abandon(receivedDeferredMessage);
break;
case SUSPENDED:
operation = receiver.deadLetter(receivedDeferredMessage);
break;
case COMPLETED:
operation = receiver.complete(receivedDeferredMessage);
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException(
"Disposition status not recognized for this test case: " + dispositionStatus));
}
StepVerifier.create(operation)
.expectComplete()
.verify();
if (dispositionStatus != DispositionStatus.COMPLETED) {
messagesPending.decrementAndGet();
}
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void sendReceiveMessageWithVariousPropertyTypes(MessagingEntityType entityType) {
final boolean isSessionEnabled = true;
setSender(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage messageToSend = getMessage(messageId, isSessionEnabled);
Map<String, Object> sentProperties = messageToSend.getApplicationProperties();
sentProperties.put("NullProperty", null);
sentProperties.put("BooleanProperty", true);
sentProperties.put("ByteProperty", (byte) 1);
sentProperties.put("ShortProperty", (short) 2);
sentProperties.put("IntProperty", 3);
sentProperties.put("LongProperty", 4L);
sentProperties.put("FloatProperty", 5.5f);
sentProperties.put("DoubleProperty", 6.6f);
sentProperties.put("CharProperty", 'z');
sentProperties.put("UUIDProperty", UUID.fromString("38400000-8cf0-11bd-b23e-10b96e4ef00d"));
sentProperties.put("StringProperty", "string");
sendMessage(messageToSend).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1))
.assertNext(receivedMessage -> {
messagesPending.decrementAndGet();
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
final Map<String, Object> received = receivedMessage.getApplicationProperties();
assertEquals(sentProperties.size(), received.size());
for (Map.Entry<String, Object> sentEntry : sentProperties.entrySet()) {
if (sentEntry.getValue() != null && sentEntry.getValue().getClass().isArray()) {
assertArrayEquals((Object[]) sentEntry.getValue(), (Object[]) received.get(sentEntry.getKey()));
} else {
final Object expected = sentEntry.getValue();
final Object actual = received.get(sentEntry.getKey());
assertEquals(expected, actual, String.format(
"Key '%s' does not match. Expected: '%s'. Actual: '%s'", sentEntry.getKey(), expected,
actual));
}
}
})
.thenCancel()
.verify();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void setAndGetSessionState(MessagingEntityType entityType) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, true);
final byte[] sessionState = "Finished".getBytes(UTF_8);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage messageToSend = getMessage(messageId, true);
sendMessage(messageToSend).block(Duration.ofSeconds(10));
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, true);
StepVerifier.create(receiver.receiveMessages()
.take(1)
.flatMap(message -> {
logger.info("SessionId: {}. LockToken: {}. LockedUntil: {}. Message received.",
message.getSessionId(), message.getLockToken(), message.getLockedUntil());
receiver.complete(message).block(Duration.ofSeconds(15));
messagesPending.decrementAndGet();
return receiver.setSessionState(sessionState);
}))
.expectComplete()
.verify();
StepVerifier.create(receiver.getSessionState())
.assertNext(state -> {
logger.info("State received: {}", new String(state, UTF_8));
assertArrayEquals(sessionState, state);
})
.verifyComplete();
}
/**
* Verifies that we can receive a message from dead letter queue.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveFromDeadLetter(MessagingEntityType entityType) {
final Duration shortWait = Duration.ofSeconds(2);
final boolean isSessionEnabled = false;
final int entityIndex = 0;
setSender(entityType, entityIndex, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final List<ServiceBusReceivedMessage> receivedMessages = new ArrayList<>();
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, entityIndex, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1))
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
final ServiceBusReceiverAsyncClient deadLetterReceiver;
switch (entityType) {
case QUEUE:
final String queueName = getQueueName(entityIndex);
assertNotNull(queueName, "'queueName' cannot be null.");
deadLetterReceiver = getBuilder(false).receiver()
.queueName(queueName)
.subQueue(SubQueue.DEAD_LETTER_QUEUE)
.buildAsyncClient();
break;
case SUBSCRIPTION:
final String topicName = getTopicName(entityIndex);
final String subscriptionName = getSubscriptionBaseName();
assertNotNull(topicName, "'topicName' cannot be null.");
assertNotNull(subscriptionName, "'subscriptionName' cannot be null.");
deadLetterReceiver = getBuilder(false).receiver()
.topicName(topicName)
.subscriptionName(subscriptionName)
.subQueue(SubQueue.DEAD_LETTER_QUEUE)
.buildAsyncClient();
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Unknown entity type: " + entityType));
}
try {
StepVerifier.create(deadLetterReceiver.receiveMessages())
.assertNext(serviceBusReceivedMessage -> {
receivedMessages.add(serviceBusReceivedMessage);
assertMessageEquals(serviceBusReceivedMessage, messageId, isSessionEnabled);
})
.thenAwait(shortWait)
.thenCancel()
.verify();
} finally {
deadLetterReceiver.close();
}
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void renewMessageLock(MessagingEntityType entityType) {
final boolean isSessionEnabled = false;
setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final Duration maximumDuration = Duration.ofSeconds(35);
final Duration sleepDuration = maximumDuration.plusMillis(500);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final ServiceBusReceivedMessage receivedMessage = sendMessage(message)
.then(receiver.receiveMessages().next())
.block(TIMEOUT);
assertNotNull(receivedMessage);
final OffsetDateTime lockedUntil = receivedMessage.getLockedUntil();
assertNotNull(lockedUntil);
StepVerifier.create(receiver.renewMessageLock(receivedMessage, maximumDuration))
.thenAwait(sleepDuration)
.then(() -> {
logger.info("Completing message.");
int numberCompleted = completeMessages(receiver, Collections.singletonList(receivedMessage));
messagesPending.addAndGet(-numberCompleted);
})
.expectComplete()
.verify(Duration.ofMinutes(3));
}
/**
* Verifies that we can receive a message which have different section set (i.e header, footer, annotations,
* application properties etc).
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndValidateProperties(MessagingEntityType entityType) {
final boolean isSessionEnabled = false;
final int totalMessages = 1;
final String subject = "subject";
final Map<String, Object> footer = new HashMap<>();
footer.put("footer-key-1", "footer-value-1");
footer.put("footer-key-2", "footer-value-2");
final Map<String, Object> applicationProperties = new HashMap<>();
applicationProperties.put("ap-key-1", "ap-value-1");
applicationProperties.put("ap-key-2", "ap-value-2");
final Map<String, Object> deliveryAnnotation = new HashMap<>();
deliveryAnnotation.put("delivery-annotations-key-1", "delivery-annotations-value-1");
deliveryAnnotation.put("delivery-annotations-key-2", "delivery-annotations-value-2");
final String messageId = UUID.randomUUID().toString();
final AmqpAnnotatedMessage expectedAmqpProperties = new AmqpAnnotatedMessage(
AmqpMessageBody.fromData(CONTENTS_BYTES));
expectedAmqpProperties.getProperties().setSubject(subject);
expectedAmqpProperties.getProperties().setReplyToGroupId("r-gid");
expectedAmqpProperties.getProperties().setReplyTo(new AmqpAddress("reply-to"));
expectedAmqpProperties.getProperties().setContentType("content-type");
expectedAmqpProperties.getProperties().setCorrelationId(new AmqpMessageId("correlation-id"));
expectedAmqpProperties.getProperties().setTo(new AmqpAddress("to"));
expectedAmqpProperties.getProperties().setAbsoluteExpiryTime(OffsetDateTime.now().plusSeconds(60));
expectedAmqpProperties.getProperties().setUserId("user-id-1".getBytes());
expectedAmqpProperties.getProperties().setContentEncoding("string");
expectedAmqpProperties.getProperties().setGroupSequence(2L);
expectedAmqpProperties.getProperties().setCreationTime(OffsetDateTime.now().plusSeconds(30));
expectedAmqpProperties.getHeader().setPriority((short) 2);
expectedAmqpProperties.getHeader().setFirstAcquirer(true);
expectedAmqpProperties.getHeader().setDurable(true);
expectedAmqpProperties.getFooter().putAll(footer);
expectedAmqpProperties.getDeliveryAnnotations().putAll(deliveryAnnotation);
expectedAmqpProperties.getApplicationProperties().putAll(applicationProperties);
final ServiceBusMessage message = TestUtils.getServiceBusMessage(CONTENTS_BYTES, messageId);
final AmqpAnnotatedMessage amqpAnnotatedMessage = message.getRawAmqpMessage();
amqpAnnotatedMessage.getMessageAnnotations().putAll(expectedAmqpProperties.getMessageAnnotations());
amqpAnnotatedMessage.getApplicationProperties().putAll(expectedAmqpProperties.getApplicationProperties());
amqpAnnotatedMessage.getDeliveryAnnotations().putAll(expectedAmqpProperties.getDeliveryAnnotations());
amqpAnnotatedMessage.getFooter().putAll(expectedAmqpProperties.getFooter());
final AmqpMessageHeader header = amqpAnnotatedMessage.getHeader();
header.setFirstAcquirer(expectedAmqpProperties.getHeader().isFirstAcquirer());
header.setTimeToLive(expectedAmqpProperties.getHeader().getTimeToLive());
header.setDurable(expectedAmqpProperties.getHeader().isDurable());
header.setDeliveryCount(expectedAmqpProperties.getHeader().getDeliveryCount());
header.setPriority(expectedAmqpProperties.getHeader().getPriority());
final AmqpMessageProperties amqpMessageProperties = amqpAnnotatedMessage.getProperties();
amqpMessageProperties.setReplyTo((expectedAmqpProperties.getProperties().getReplyTo()));
amqpMessageProperties.setContentEncoding((expectedAmqpProperties.getProperties().getContentEncoding()));
amqpMessageProperties.setAbsoluteExpiryTime((expectedAmqpProperties.getProperties().getAbsoluteExpiryTime()));
amqpMessageProperties.setSubject((expectedAmqpProperties.getProperties().getSubject()));
amqpMessageProperties.setContentType(expectedAmqpProperties.getProperties().getContentType());
amqpMessageProperties.setCorrelationId(expectedAmqpProperties.getProperties().getCorrelationId());
amqpMessageProperties.setTo(expectedAmqpProperties.getProperties().getTo());
amqpMessageProperties.setGroupSequence(expectedAmqpProperties.getProperties().getGroupSequence());
amqpMessageProperties.setUserId(expectedAmqpProperties.getProperties().getUserId());
amqpMessageProperties.setAbsoluteExpiryTime(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime());
amqpMessageProperties.setCreationTime(expectedAmqpProperties.getProperties().getCreationTime());
amqpMessageProperties.setReplyToGroupId(expectedAmqpProperties.getProperties().getReplyToGroupId());
setSender(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages()/*.take(totalMessages)*/)
.assertNext(received -> {
assertNotNull(received.getLockToken());
AmqpAnnotatedMessage actual = received.getRawAmqpMessage();
try {
assertArrayEquals(CONTENTS_BYTES, message.getBody().toBytes());
assertEquals(expectedAmqpProperties.getHeader().getPriority(), actual.getHeader().getPriority());
assertEquals(expectedAmqpProperties.getHeader().isFirstAcquirer(), actual.getHeader().isFirstAcquirer());
assertEquals(expectedAmqpProperties.getHeader().isDurable(), actual.getHeader().isDurable());
assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject());
assertEquals(expectedAmqpProperties.getProperties().getReplyToGroupId(), actual.getProperties().getReplyToGroupId());
assertEquals(expectedAmqpProperties.getProperties().getReplyTo(), actual.getProperties().getReplyTo());
assertEquals(expectedAmqpProperties.getProperties().getContentType(), actual.getProperties().getContentType());
assertEquals(expectedAmqpProperties.getProperties().getCorrelationId(), actual.getProperties().getCorrelationId());
assertEquals(expectedAmqpProperties.getProperties().getTo(), actual.getProperties().getTo());
assertEquals(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime().toEpochSecond(), actual.getProperties().getAbsoluteExpiryTime().toEpochSecond());
assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject());
assertEquals(expectedAmqpProperties.getProperties().getContentEncoding(), actual.getProperties().getContentEncoding());
assertEquals(expectedAmqpProperties.getProperties().getGroupSequence(), actual.getProperties().getGroupSequence());
assertEquals(expectedAmqpProperties.getProperties().getCreationTime().toEpochSecond(), actual.getProperties().getCreationTime().toEpochSecond());
assertArrayEquals(expectedAmqpProperties.getProperties().getUserId(), actual.getProperties().getUserId());
assertMapValues(expectedAmqpProperties.getDeliveryAnnotations(), actual.getDeliveryAnnotations());
assertMapValues(expectedAmqpProperties.getMessageAnnotations(), actual.getMessageAnnotations());
assertMapValues(expectedAmqpProperties.getApplicationProperties(), actual.getApplicationProperties());
assertMapValues(expectedAmqpProperties.getFooter(), actual.getFooter());
} finally {
logger.info("Completing message.");
receiver.complete(received).block(Duration.ofSeconds(15));
messagesPending.decrementAndGet();
}
})
.thenCancel()
.verify(Duration.ofMinutes(2));
}
/**
* Verifies we can autocomplete for a queue.
*
* @param entityType Entity Type.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void autoComplete(MessagingEntityType entityType) {
final Duration shortWait = Duration.ofSeconds(2);
final int index = TestUtils.USE_CASE_AUTO_COMPLETE;
setSender(entityType, index, false);
final int numberOfEvents = 3;
final String messageId = UUID.randomUUID().toString();
final List<ServiceBusMessage> messages = getServiceBusMessages(numberOfEvents, messageId);
setReceiver(entityType, index, false);
final ServiceBusReceivedMessage lastMessage = receiver.peekMessage().block(TIMEOUT);
Mono.when(messages.stream().map(this::sendMessage)
.collect(Collectors.toList()))
.block(TIMEOUT);
final ServiceBusReceiverAsyncClient autoCompleteReceiver =
getReceiverBuilder(false, entityType, index, false)
.buildAsyncClient();
try {
StepVerifier.create(autoCompleteReceiver.receiveMessages())
.assertNext(receivedMessage -> {
if (lastMessage != null) {
assertEquals(lastMessage.getMessageId(), receivedMessage.getMessageId());
} else {
assertEquals(messageId, receivedMessage.getMessageId());
}
})
.assertNext(context -> {
if (lastMessage == null) {
assertEquals(messageId, context.getMessageId());
}
})
.assertNext(context -> {
if (lastMessage == null) {
assertEquals(messageId, context.getMessageId());
}
})
.thenAwait(shortWait)
.thenCancel()
.verify(TIMEOUT);
} finally {
autoCompleteReceiver.close();
}
final ServiceBusReceivedMessage newLastMessage = receiver.peekMessage().block(TIMEOUT);
if (lastMessage == null) {
assertNull(newLastMessage,
String.format("Actual messageId[%s]", newLastMessage != null ? newLastMessage.getMessageId() : "n/a"));
} else {
assertNotNull(newLastMessage);
assertEquals(lastMessage.getSequenceNumber(), newLastMessage.getSequenceNumber());
}
}
/**
* Asserts the length and values with in the map.
*/
private void assertMapValues(Map<String, Object> expectedMap, Map<String, Object> actualMap) {
assertTrue(actualMap.size() >= expectedMap.size());
for (String key : expectedMap.keySet()) {
assertEquals(expectedMap.get(key), actualMap.get(key), "Value is not equal for Key " + key);
}
}
/**
* Sets the sender and receiver. If session is enabled, then a single-named session receiver is created.
*/
private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) {
setSender(entityType, entityIndex, isSessionEnabled);
setReceiver(entityType, entityIndex, isSessionEnabled);
}
private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) {
final boolean shareConnection = false;
final boolean useCredentials = false;
if (isSessionEnabled) {
assertNotNull(sessionId, "'sessionId' should have been set.");
sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.disableAutoComplete()
.buildAsyncClient();
this.receiver = sessionReceiver.acceptSession(sessionId).block();
} else {
this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.disableAutoComplete()
.buildAsyncClient();
}
}
private void setSender(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) {
final boolean shareConnection = false;
final boolean useCredentials = false;
this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection)
.buildAsyncClient();
}
private Mono<Void> sendMessage(ServiceBusMessage message) {
return sender.sendMessage(message).doOnSuccess(aVoid -> {
int number = messagesPending.incrementAndGet();
logger.info("Message Id {}. Number sent: {}", message.getMessageId(), number);
});
}
private int completeMessages(ServiceBusReceiverAsyncClient client, List<ServiceBusReceivedMessage> messages) {
Mono.when(messages.stream().map(e -> client.complete(e))
.collect(Collectors.toList()))
.block(TIMEOUT);
return messages.size();
}
} |
Updated with, flatMap instead. | void createTransactionAndRollbackMessagesTest(MessagingEntityType entityType) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(OPERATION_TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiver.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
StepVerifier.create(receiver.receiveMessages()
.concatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next())
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
StepVerifier.create(receiver.rollbackTransaction(transaction.get()))
.verifyComplete();
} | .concatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next()) | void createTransactionAndRollbackMessagesTest(MessagingEntityType entityType) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(OPERATION_TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiver.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
StepVerifier.create(receiver.receiveMessages()
.flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1))
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
StepVerifier.create(receiver.rollbackTransaction(transaction.get()))
.verifyComplete();
} | class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase {
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class);
private final AtomicInteger messagesPending = new AtomicInteger();
private final boolean isSessionEnabled = false;
private ServiceBusReceiverAsyncClient receiver;
private ServiceBusSenderAsyncClient sender;
private ServiceBusSessionReceiverAsyncClient sessionReceiver;
ServiceBusReceiverAsyncClientIntegrationTest() {
super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class));
}
@Override
protected void beforeTest() {
sessionId = UUID.randomUUID().toString();
}
@Override
protected void afterTest() {
sharedBuilder = null;
try {
dispose(receiver, sender, sessionReceiver);
} catch (Exception e) {
logger.warning("Error occurred when draining queue.", e);
}
}
/**
* Verifies that we can create multiple transaction using sender and receiver.
*/
@Test
void createMultipleTransactionTest() {
setSenderAndReceiver(MessagingEntityType.QUEUE, 0, isSessionEnabled);
StepVerifier.create(receiver.createTransaction())
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(receiver.createTransaction())
.assertNext(Assertions::assertNotNull)
.verifyComplete();
}
/**
* Verifies that we can create transaction and complete.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
/**
* Verifies that we can do following using shared connection and on non session entity. 1. create transaction 2.
* receive and settle with transactionContext. 3. commit Rollback this transaction.
*/
@ParameterizedTest
@EnumSource(DispositionStatus.class)
void transactionSendReceiveAndCommit(DispositionStatus dispositionStatus) {
final MessagingEntityType entityType = MessagingEntityType.QUEUE;
setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_TRANSACTION_SENDRECEIVE_AND_COMPLETE, isSessionEnabled);
final String messageId1 = UUID.randomUUID().toString();
final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled);
final String deadLetterReason = "test reason";
sendMessage(message1).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiver.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
assertNotNull(transaction.get());
final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT);
assertNotNull(receivedMessage);
final Mono<Void> operation;
switch (dispositionStatus) {
case COMPLETED:
operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get()));
messagesPending.decrementAndGet();
break;
case ABANDONED:
operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(transaction.get()));
break;
case SUSPENDED:
DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setTransactionContext(transaction.get())
.setDeadLetterReason(deadLetterReason);
operation = receiver.deadLetter(receivedMessage, deadLetterOptions);
messagesPending.decrementAndGet();
break;
case DEFERRED:
operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(transaction.get()));
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException(
"Disposition status not recognized for this test case: " + dispositionStatus));
}
StepVerifier.create(operation)
.verifyComplete();
StepVerifier.create(receiver.commitTransaction(transaction.get()))
.verifyComplete();
}
/**
* Verifies that we can do following on different clients i.e. sender and receiver. 1. create transaction using
* sender 2. receive and complete with transactionContext. 3. Commit this transaction using sender.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
@Disabled
void transactionReceiveCompleteCommitMixClient(MessagingEntityType entityType) {
final boolean shareConnection = true;
final boolean useCredentials = false;
final int entityIndex = 0;
this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection)
.buildAsyncClient();
this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.buildAsyncClient();
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(sender.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
assertNotNull(transaction.get());
final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT);
assertNotNull(receivedMessage);
StepVerifier.create(receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get())))
.verifyComplete();
StepVerifier.create(sender.commitTransaction(transaction.get()))
.verifyComplete();
}
/**
* Verifies that we can send and receive two messages.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveTwoMessagesAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
final int entityIndex = 0;
final boolean shareConnection = false;
final boolean useCredentials = false;
final Duration shortWait = Duration.ofSeconds(3);
this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection)
.buildAsyncClient();
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
Mono.when(sendMessage(message), sendMessage(message)).block(TIMEOUT);
if (isSessionEnabled) {
assertNotNull(sessionId, "'sessionId' should have been set.");
this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.buildAsyncClient();
this.receiver = sessionReceiver.acceptSession(sessionId).block();
} else {
this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.buildAsyncClient();
}
StepVerifier.create(receiver.receiveMessages())
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
})
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
})
.thenAwait(shortWait)
.thenCancel()
.verify();
}
/**
* Verifies that we can send and receive a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveMessageAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
final int entityIndex = 0;
final boolean shareConnection = false;
final boolean useCredentials = false;
final Duration shortWait = Duration.ofSeconds(3);
this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection)
.buildAsyncClient();
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
if (isSessionEnabled) {
assertNotNull(sessionId, "'sessionId' should have been set.");
this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.buildAsyncClient();
this.receiver = this.sessionReceiver.acceptSession(sessionId).block();
} else {
this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.buildAsyncClient();
}
StepVerifier.create(receiver.receiveMessages())
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
})
.thenAwait(shortWait)
.thenCancel()
.verify();
StepVerifier.create(receiver.receiveMessages())
.thenAwait(shortWait)
.thenCancel()
.verify();
}
/**
* Verifies that we can send and peek a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(receiver.peekMessage())
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.verifyComplete();
StepVerifier.create(receiver.receiveMessages().concatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next())
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.verifyComplete();
}
/**
* Verifies that an empty entity does not error when peeking.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessageEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) {
setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled);
final int fromSequenceNumber = 1;
StepVerifier.create(receiver.peekMessageAt(fromSequenceNumber))
.verifyComplete();
}
/**
* Verifies that we can schedule and receive a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void sendScheduledMessageAndReceive(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final Duration shortDelay = Duration.ofSeconds(4);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(2);
sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(Mono.delay(shortDelay).then(receiver.receiveMessages()
.concatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next()))
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
}
/**
* Verifies that we can cancel a scheduled message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void cancelScheduledMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(10);
final Duration delayDuration = Duration.ofSeconds(3);
final Long sequenceNumber = sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT);
logger.verbose("Scheduled the message, sequence number {}.", sequenceNumber);
assertNotNull(sequenceNumber);
Mono.delay(delayDuration)
.then(sender.cancelScheduledMessage(sequenceNumber))
.block(TIMEOUT);
messagesPending.decrementAndGet();
logger.verbose("Cancelled the scheduled message, sequence number {}.", sequenceNumber);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages().take(1))
.thenAwait(Duration.ofSeconds(5))
.thenCancel()
.verify();
}
/**
* Verifies that we can send and peek a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekFromSequenceNumberMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
final int entityIndex = 3;
setSender(entityType, entityIndex, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, entityIndex, isSessionEnabled);
final ServiceBusReceivedMessage peekMessage = receiver.peekMessage().block(TIMEOUT);
assertNotNull(peekMessage);
final long sequenceNumber = peekMessage.getSequenceNumber();
try {
StepVerifier.create(receiver.peekMessageAt(sequenceNumber))
.assertNext(m -> {
assertEquals(sequenceNumber, m.getSequenceNumber());
assertMessageEquals(m, messageId, isSessionEnabled);
})
.verifyComplete();
} finally {
StepVerifier.create(receiver.receiveMessages()
.concatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage))
.next())
.expectNextCount(1)
.verifyComplete();
messagesPending.decrementAndGet();
}
}
/**
* Verifies that we can send and peek a batch of messages and the sequence number is tracked correctly.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessages(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled);
final BiConsumer<ServiceBusReceivedMessage, Integer> checkCorrectMessage = (message, index) -> {
final Map<String, Object> properties = message.getApplicationProperties();
final Object value = properties.get(MESSAGE_POSITION_ID);
assertTrue(value instanceof Integer, "Did not contain correct position number: " + value);
final int position = (int) value;
assertEquals(index, position);
};
final String messageId = UUID.randomUUID().toString();
final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(10, messageId, CONTENTS_BYTES);
if (isSessionEnabled) {
messages.forEach(m -> m.setSessionId(sessionId));
}
sender.sendMessages(messages)
.doOnSuccess(aVoid -> {
int number = messagesPending.addAndGet(messages.size());
logger.info("Number of messages sent: {}", number);
})
.block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled);
try {
StepVerifier.create(receiver.peekMessages(3))
.assertNext(message -> checkCorrectMessage.accept(message, 0))
.assertNext(message -> checkCorrectMessage.accept(message, 1))
.assertNext(message -> checkCorrectMessage.accept(message, 2))
.verifyComplete();
StepVerifier.create(receiver.peekMessages(4))
.assertNext(message -> checkCorrectMessage.accept(message, 3))
.assertNext(message -> checkCorrectMessage.accept(message, 4))
.assertNext(message -> checkCorrectMessage.accept(message, 5))
.assertNext(message -> checkCorrectMessage.accept(message, 6))
.verifyComplete();
StepVerifier.create(receiver.peekMessage())
.assertNext(message -> checkCorrectMessage.accept(message, 7))
.verifyComplete();
} finally {
AtomicInteger completed = new AtomicInteger();
StepVerifier.create(receiver.receiveMessages().take(messages.size()))
.thenConsumeWhile(receivedMessage -> {
completed.incrementAndGet();
receiver.complete(receivedMessage).block(OPERATION_TIMEOUT);
return completed.get() <= messages.size();
})
.thenCancel()
.verify();
messagesPending.addAndGet(-messages.size());
}
}
/**
* Verifies that we can send and peek a batch of messages.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessagesFromSequence(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_MESSAGE_FROM_SEQUENCE, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
final int maxMessages = 2;
final int fromSequenceNumber = 1;
Mono.when(sendMessage(message), sendMessage(message)).block(TIMEOUT);
StepVerifier.create(receiver.peekMessagesAt(maxMessages, fromSequenceNumber))
.expectNextCount(maxMessages)
.verifyComplete();
StepVerifier.create(receiver.receiveMessages().take(maxMessages))
.assertNext(receivedMessage -> {
receiver.complete(receivedMessage).block(Duration.ofSeconds(15));
})
.assertNext(receivedMessage -> {
receiver.complete(receivedMessage).block(Duration.ofSeconds(15));
})
.expectComplete()
.verify(TIMEOUT);
}
/**
* Verifies that an empty entity does not error when peeking.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessagesFromSequenceEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) {
setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled);
final int maxMessages = 10;
final int fromSequenceNumber = 1;
StepVerifier.create(receiver.peekMessagesAt(maxMessages, fromSequenceNumber))
.verifyComplete();
}
/**
* Verifies that we can dead-letter a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void deadLetterMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
final int entityIndex = 0;
setSender(entityType, entityIndex, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, entityIndex, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages()
.concatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage))
.next())
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
})
.thenCancel()
.verify();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages()
.concatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage))
.next())
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
messagesPending.decrementAndGet();
}
/**
* Verifies that we can renew message lock on a non-session receiver.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndRenewLock(MessagingEntityType entityType) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, false);
final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT);
assertNotNull(receivedMessage);
assertNotNull(receivedMessage.getLockedUntil());
final OffsetDateTime initialLock = receivedMessage.getLockedUntil();
logger.info("Received message. Seq: {}. lockedUntil: {}", receivedMessage.getSequenceNumber(), initialLock);
try {
StepVerifier.create(Mono.delay(Duration.ofSeconds(7))
.then(Mono.defer(() -> receiver.renewMessageLock(receivedMessage))))
.assertNext(lockedUntil -> {
assertTrue(lockedUntil.isAfter(initialLock),
String.format("Updated lock is not after the initial Lock. updated: [%s]. initial:[%s]",
lockedUntil, initialLock));
})
.verifyComplete();
} finally {
logger.info("Completing message. Seq: {}.", receivedMessage.getSequenceNumber());
receiver.complete(receivedMessage)
.doOnSuccess(aVoid -> messagesPending.decrementAndGet())
.block(TIMEOUT);
}
}
/**
* Verifies that the lock can be automatically renewed.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void autoRenewLockOnReceiveMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
final AtomicInteger lockRenewCount = new AtomicInteger();
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages().concatMap(received -> {
logger.info("{}: lockToken[{}]. lockedUntil[{}]. now[{}]", received.getSequenceNumber(),
received.getLockToken(), received.getLockedUntil(), OffsetDateTime.now());
while (lockRenewCount.get() < 4) {
lockRenewCount.incrementAndGet();
logger.info("Iteration {}: Curren time {}.", lockRenewCount.get(), OffsetDateTime.now());
try {
TimeUnit.SECONDS.sleep(5);
} catch (InterruptedException error) {
logger.error("Error occurred while sleeping: " + error);
}
}
return receiver.complete(received).thenReturn(received);
}))
.assertNext(received -> {
assertTrue(lockRenewCount.get() > 0);
messagesPending.decrementAndGet();
})
.thenCancel()
.verify();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndAbandon(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages()
.concatMap(receivedMessage -> receiver.abandon(receivedMessage).thenReturn(receivedMessage))
.next())
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndDefer(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled);
AtomicReference<ServiceBusReceivedMessage> received = new AtomicReference<>();
StepVerifier.create(receiver.receiveMessages()
.concatMap(receivedMessage -> receiver.defer(receivedMessage).thenReturn(receivedMessage))
.next())
.assertNext(m -> {
received.set(m);
assertMessageEquals(m, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
receiver.receiveDeferredMessage(received.get().getSequenceNumber())
.flatMap(m -> receiver.complete(m))
.block(TIMEOUT);
messagesPending.decrementAndGet();
}
/**
* Test we can receive a deferred message via sequence number and then perform abandon, suspend, or complete on it.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveDeferredMessageBySequenceNumber(MessagingEntityType entityType, DispositionStatus dispositionStatus) {
setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFERRED_MESSAGE_BY_SEQUENCE_NUMBER, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT);
assertNotNull(receivedMessage);
receiver.defer(receivedMessage).block(TIMEOUT);
final ServiceBusReceivedMessage receivedDeferredMessage = receiver
.receiveDeferredMessage(receivedMessage.getSequenceNumber())
.block(TIMEOUT);
assertNotNull(receivedDeferredMessage);
assertEquals(receivedMessage.getSequenceNumber(), receivedDeferredMessage.getSequenceNumber());
final Mono<Void> operation;
switch (dispositionStatus) {
case ABANDONED:
operation = receiver.abandon(receivedDeferredMessage);
break;
case SUSPENDED:
operation = receiver.deadLetter(receivedDeferredMessage);
break;
case COMPLETED:
operation = receiver.complete(receivedDeferredMessage);
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException(
"Disposition status not recognized for this test case: " + dispositionStatus));
}
StepVerifier.create(operation)
.expectComplete()
.verify();
if (dispositionStatus != DispositionStatus.COMPLETED) {
messagesPending.decrementAndGet();
}
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void sendReceiveMessageWithVariousPropertyTypes(MessagingEntityType entityType) {
final boolean isSessionEnabled = true;
setSender(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage messageToSend = getMessage(messageId, isSessionEnabled);
Map<String, Object> sentProperties = messageToSend.getApplicationProperties();
sentProperties.put("NullProperty", null);
sentProperties.put("BooleanProperty", true);
sentProperties.put("ByteProperty", (byte) 1);
sentProperties.put("ShortProperty", (short) 2);
sentProperties.put("IntProperty", 3);
sentProperties.put("LongProperty", 4L);
sentProperties.put("FloatProperty", 5.5f);
sentProperties.put("DoubleProperty", 6.6f);
sentProperties.put("CharProperty", 'z');
sentProperties.put("UUIDProperty", UUID.fromString("38400000-8cf0-11bd-b23e-10b96e4ef00d"));
sentProperties.put("StringProperty", "string");
sendMessage(messageToSend).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages().concatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next())
.assertNext(receivedMessage -> {
messagesPending.decrementAndGet();
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
final Map<String, Object> received = receivedMessage.getApplicationProperties();
assertEquals(sentProperties.size(), received.size());
for (Map.Entry<String, Object> sentEntry : sentProperties.entrySet()) {
if (sentEntry.getValue() != null && sentEntry.getValue().getClass().isArray()) {
assertArrayEquals((Object[]) sentEntry.getValue(), (Object[]) received.get(sentEntry.getKey()));
} else {
final Object expected = sentEntry.getValue();
final Object actual = received.get(sentEntry.getKey());
assertEquals(expected, actual, String.format(
"Key '%s' does not match. Expected: '%s'. Actual: '%s'", sentEntry.getKey(), expected,
actual));
}
}
})
.thenCancel()
.verify();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void setAndGetSessionState(MessagingEntityType entityType) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, true);
final byte[] sessionState = "Finished".getBytes(UTF_8);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage messageToSend = getMessage(messageId, true);
sendMessage(messageToSend).block(Duration.ofSeconds(10));
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, true);
StepVerifier.create(receiver.receiveMessages()
.take(1)
.flatMap(message -> {
logger.info("SessionId: {}. LockToken: {}. LockedUntil: {}. Message received.",
message.getSessionId(), message.getLockToken(), message.getLockedUntil());
receiver.complete(message).block(Duration.ofSeconds(15));
messagesPending.decrementAndGet();
return receiver.setSessionState(sessionState);
}))
.expectComplete()
.verify();
StepVerifier.create(receiver.getSessionState())
.assertNext(state -> {
logger.info("State received: {}", new String(state, UTF_8));
assertArrayEquals(sessionState, state);
})
.verifyComplete();
}
/**
* Verifies that we can receive a message from dead letter queue.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveFromDeadLetter(MessagingEntityType entityType) {
final Duration shortWait = Duration.ofSeconds(2);
final boolean isSessionEnabled = false;
final int entityIndex = 0;
setSender(entityType, entityIndex, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final List<ServiceBusReceivedMessage> receivedMessages = new ArrayList<>();
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, entityIndex, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages().concatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).next())
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
final ServiceBusReceiverAsyncClient deadLetterReceiver;
switch (entityType) {
case QUEUE:
final String queueName = getQueueName(entityIndex);
assertNotNull(queueName, "'queueName' cannot be null.");
deadLetterReceiver = getBuilder(false).receiver()
.queueName(queueName)
.subQueue(SubQueue.DEAD_LETTER_QUEUE)
.buildAsyncClient();
break;
case SUBSCRIPTION:
final String topicName = getTopicName(entityIndex);
final String subscriptionName = getSubscriptionBaseName();
assertNotNull(topicName, "'topicName' cannot be null.");
assertNotNull(subscriptionName, "'subscriptionName' cannot be null.");
deadLetterReceiver = getBuilder(false).receiver()
.topicName(topicName)
.subscriptionName(subscriptionName)
.subQueue(SubQueue.DEAD_LETTER_QUEUE)
.buildAsyncClient();
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Unknown entity type: " + entityType));
}
StepVerifier.create(deadLetterReceiver.receiveMessages())
.assertNext(serviceBusReceivedMessage -> {
receivedMessages.add(serviceBusReceivedMessage);
assertMessageEquals(serviceBusReceivedMessage, messageId, isSessionEnabled);
})
.thenAwait(shortWait)
.thenCancel()
.verify();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void renewMessageLock(MessagingEntityType entityType) {
final boolean isSessionEnabled = false;
setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final Duration maximumDuration = Duration.ofSeconds(35);
final Duration sleepDuration = maximumDuration.plusMillis(500);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final ServiceBusReceivedMessage receivedMessage = sendMessage(message)
.then(receiver.receiveMessages().next())
.block(TIMEOUT);
assertNotNull(receivedMessage);
final OffsetDateTime lockedUntil = receivedMessage.getLockedUntil();
assertNotNull(lockedUntil);
StepVerifier.create(receiver.renewMessageLock(receivedMessage, maximumDuration))
.thenAwait(sleepDuration)
.then(() -> {
logger.info("Completing message.");
int numberCompleted = completeMessages(receiver, Collections.singletonList(receivedMessage));
messagesPending.addAndGet(-numberCompleted);
})
.expectComplete()
.verify(Duration.ofMinutes(3));
}
/**
* Verifies that we can receive a message which have different section set (i.e header, footer, annotations,
* application properties etc).
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndValidateProperties(MessagingEntityType entityType) {
final boolean isSessionEnabled = false;
final int totalMessages = 1;
final String subject = "subject";
final Map<String, Object> footer = new HashMap<>();
footer.put("footer-key-1", "footer-value-1");
footer.put("footer-key-2", "footer-value-2");
final Map<String, Object> applicationProperties = new HashMap<>();
applicationProperties.put("ap-key-1", "ap-value-1");
applicationProperties.put("ap-key-2", "ap-value-2");
final Map<String, Object> deliveryAnnotation = new HashMap<>();
deliveryAnnotation.put("delivery-annotations-key-1", "delivery-annotations-value-1");
deliveryAnnotation.put("delivery-annotations-key-2", "delivery-annotations-value-2");
final String messageId = UUID.randomUUID().toString();
final AmqpAnnotatedMessage expectedAmqpProperties = new AmqpAnnotatedMessage(
AmqpMessageBody.fromData(CONTENTS_BYTES));
expectedAmqpProperties.getProperties().setSubject(subject);
expectedAmqpProperties.getProperties().setReplyToGroupId("r-gid");
expectedAmqpProperties.getProperties().setReplyTo(new AmqpAddress("reply-to"));
expectedAmqpProperties.getProperties().setContentType("content-type");
expectedAmqpProperties.getProperties().setCorrelationId(new AmqpMessageId("correlation-id"));
expectedAmqpProperties.getProperties().setTo(new AmqpAddress("to"));
expectedAmqpProperties.getProperties().setAbsoluteExpiryTime(OffsetDateTime.now().plusSeconds(60));
expectedAmqpProperties.getProperties().setUserId("user-id-1".getBytes());
expectedAmqpProperties.getProperties().setContentEncoding("string");
expectedAmqpProperties.getProperties().setGroupSequence(2L);
expectedAmqpProperties.getProperties().setCreationTime(OffsetDateTime.now().plusSeconds(30));
expectedAmqpProperties.getHeader().setPriority((short) 2);
expectedAmqpProperties.getHeader().setFirstAcquirer(true);
expectedAmqpProperties.getHeader().setDurable(true);
expectedAmqpProperties.getFooter().putAll(footer);
expectedAmqpProperties.getDeliveryAnnotations().putAll(deliveryAnnotation);
expectedAmqpProperties.getApplicationProperties().putAll(applicationProperties);
final ServiceBusMessage message = TestUtils.getServiceBusMessage(CONTENTS_BYTES, messageId);
final AmqpAnnotatedMessage amqpAnnotatedMessage = message.getRawAmqpMessage();
amqpAnnotatedMessage.getMessageAnnotations().putAll(expectedAmqpProperties.getMessageAnnotations());
amqpAnnotatedMessage.getApplicationProperties().putAll(expectedAmqpProperties.getApplicationProperties());
amqpAnnotatedMessage.getDeliveryAnnotations().putAll(expectedAmqpProperties.getDeliveryAnnotations());
amqpAnnotatedMessage.getFooter().putAll(expectedAmqpProperties.getFooter());
final AmqpMessageHeader header = amqpAnnotatedMessage.getHeader();
header.setFirstAcquirer(expectedAmqpProperties.getHeader().isFirstAcquirer());
header.setTimeToLive(expectedAmqpProperties.getHeader().getTimeToLive());
header.setDurable(expectedAmqpProperties.getHeader().isDurable());
header.setDeliveryCount(expectedAmqpProperties.getHeader().getDeliveryCount());
header.setPriority(expectedAmqpProperties.getHeader().getPriority());
final AmqpMessageProperties amqpMessageProperties = amqpAnnotatedMessage.getProperties();
amqpMessageProperties.setReplyTo((expectedAmqpProperties.getProperties().getReplyTo()));
amqpMessageProperties.setContentEncoding((expectedAmqpProperties.getProperties().getContentEncoding()));
amqpMessageProperties.setAbsoluteExpiryTime((expectedAmqpProperties.getProperties().getAbsoluteExpiryTime()));
amqpMessageProperties.setSubject((expectedAmqpProperties.getProperties().getSubject()));
amqpMessageProperties.setContentType(expectedAmqpProperties.getProperties().getContentType());
amqpMessageProperties.setCorrelationId(expectedAmqpProperties.getProperties().getCorrelationId());
amqpMessageProperties.setTo(expectedAmqpProperties.getProperties().getTo());
amqpMessageProperties.setGroupSequence(expectedAmqpProperties.getProperties().getGroupSequence());
amqpMessageProperties.setUserId(expectedAmqpProperties.getProperties().getUserId());
amqpMessageProperties.setAbsoluteExpiryTime(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime());
amqpMessageProperties.setCreationTime(expectedAmqpProperties.getProperties().getCreationTime());
amqpMessageProperties.setReplyToGroupId(expectedAmqpProperties.getProperties().getReplyToGroupId());
setSender(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages()/*.take(totalMessages)*/)
.assertNext(received -> {
assertNotNull(received.getLockToken());
AmqpAnnotatedMessage actual = received.getRawAmqpMessage();
try {
assertArrayEquals(CONTENTS_BYTES, message.getBody().toBytes());
assertEquals(expectedAmqpProperties.getHeader().getPriority(), actual.getHeader().getPriority());
assertEquals(expectedAmqpProperties.getHeader().isFirstAcquirer(), actual.getHeader().isFirstAcquirer());
assertEquals(expectedAmqpProperties.getHeader().isDurable(), actual.getHeader().isDurable());
assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject());
assertEquals(expectedAmqpProperties.getProperties().getReplyToGroupId(), actual.getProperties().getReplyToGroupId());
assertEquals(expectedAmqpProperties.getProperties().getReplyTo(), actual.getProperties().getReplyTo());
assertEquals(expectedAmqpProperties.getProperties().getContentType(), actual.getProperties().getContentType());
assertEquals(expectedAmqpProperties.getProperties().getCorrelationId(), actual.getProperties().getCorrelationId());
assertEquals(expectedAmqpProperties.getProperties().getTo(), actual.getProperties().getTo());
assertEquals(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime().toEpochSecond(), actual.getProperties().getAbsoluteExpiryTime().toEpochSecond());
assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject());
assertEquals(expectedAmqpProperties.getProperties().getContentEncoding(), actual.getProperties().getContentEncoding());
assertEquals(expectedAmqpProperties.getProperties().getGroupSequence(), actual.getProperties().getGroupSequence());
assertEquals(expectedAmqpProperties.getProperties().getCreationTime().toEpochSecond(), actual.getProperties().getCreationTime().toEpochSecond());
assertArrayEquals(expectedAmqpProperties.getProperties().getUserId(), actual.getProperties().getUserId());
assertMapValues(expectedAmqpProperties.getDeliveryAnnotations(), actual.getDeliveryAnnotations());
assertMapValues(expectedAmqpProperties.getMessageAnnotations(), actual.getMessageAnnotations());
assertMapValues(expectedAmqpProperties.getApplicationProperties(), actual.getApplicationProperties());
assertMapValues(expectedAmqpProperties.getFooter(), actual.getFooter());
} finally {
logger.info("Completing message.");
receiver.complete(received).block(Duration.ofSeconds(15));
messagesPending.decrementAndGet();
}
})
.thenCancel()
.verify(Duration.ofMinutes(2));
}
/**
* Verifies we can autocomplete for a queue.
*
* @param entityType Entity Type.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void autoComplete(MessagingEntityType entityType) {
final Duration shortWait = Duration.ofSeconds(2);
final int index = TestUtils.USE_CASE_AUTO_COMPLETE;
setSender(entityType, index, false);
final int numberOfEvents = 3;
final String messageId = UUID.randomUUID().toString();
final List<ServiceBusMessage> messages = getServiceBusMessages(numberOfEvents, messageId);
setReceiver(entityType, index, false);
final ServiceBusReceivedMessage lastMessage = receiver.peekMessage().block(TIMEOUT);
Mono.when(messages.stream().map(this::sendMessage)
.collect(Collectors.toList()))
.block(TIMEOUT);
final ServiceBusReceiverAsyncClient autoCompleteReceiver =
getReceiverBuilder(false, entityType, index, false)
.buildAsyncClient();
try {
StepVerifier.create(autoCompleteReceiver.receiveMessages())
.assertNext(receivedMessage -> {
if (lastMessage != null) {
assertEquals(lastMessage.getMessageId(), receivedMessage.getMessageId());
} else {
assertEquals(messageId, receivedMessage.getMessageId());
}
})
.assertNext(context -> {
if (lastMessage == null) {
assertEquals(messageId, context.getMessageId());
}
})
.assertNext(context -> {
if (lastMessage == null) {
assertEquals(messageId, context.getMessageId());
}
})
.thenAwait(shortWait)
.thenCancel()
.verify(TIMEOUT);
} finally {
autoCompleteReceiver.close();
}
final ServiceBusReceivedMessage newLastMessage = receiver.peekMessage().block(TIMEOUT);
if (lastMessage == null) {
assertNull(newLastMessage,
String.format("Actual messageId[%s]", newLastMessage != null ? newLastMessage.getMessageId() : "n/a"));
} else {
assertNotNull(newLastMessage);
assertEquals(lastMessage.getSequenceNumber(), newLastMessage.getSequenceNumber());
}
}
/**
* Asserts the length and values with in the map.
*/
private void assertMapValues(Map<String, Object> expectedMap, Map<String, Object> actualMap) {
assertTrue(actualMap.size() >= expectedMap.size());
for (String key : expectedMap.keySet()) {
assertEquals(expectedMap.get(key), actualMap.get(key), "Value is not equal for Key " + key);
}
}
/**
* Sets the sender and receiver. If session is enabled, then a single-named session receiver is created.
*/
private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) {
setSender(entityType, entityIndex, isSessionEnabled);
setReceiver(entityType, entityIndex, isSessionEnabled);
}
private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) {
final boolean shareConnection = false;
final boolean useCredentials = false;
if (isSessionEnabled) {
assertNotNull(sessionId, "'sessionId' should have been set.");
sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.disableAutoComplete()
.buildAsyncClient();
this.receiver = sessionReceiver.acceptSession(sessionId).block();
} else {
this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.disableAutoComplete()
.buildAsyncClient();
}
}
private void setSender(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) {
final boolean shareConnection = false;
final boolean useCredentials = false;
this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection)
.buildAsyncClient();
}
private Mono<Void> sendMessage(ServiceBusMessage message) {
return sender.sendMessage(message).doOnSuccess(aVoid -> {
int number = messagesPending.incrementAndGet();
logger.info("Message Id {}. Number sent: {}", message.getMessageId(), number);
});
}
private int completeMessages(ServiceBusReceiverAsyncClient client, List<ServiceBusReceivedMessage> messages) {
Mono.when(messages.stream().map(e -> client.complete(e))
.collect(Collectors.toList()))
.block(TIMEOUT);
return messages.size();
}
} | class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase {
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class);
private final AtomicInteger messagesPending = new AtomicInteger();
private final boolean isSessionEnabled = false;
private ServiceBusReceiverAsyncClient receiver;
private ServiceBusSenderAsyncClient sender;
private ServiceBusSessionReceiverAsyncClient sessionReceiver;
ServiceBusReceiverAsyncClientIntegrationTest() {
super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class));
}
@Override
protected void beforeTest() {
sessionId = UUID.randomUUID().toString();
}
@Override
protected void afterTest() {
sharedBuilder = null;
try {
dispose(receiver, sender, sessionReceiver);
} catch (Exception e) {
logger.warning("Error occurred when draining queue.", e);
}
}
/**
* Verifies that we can create multiple transaction using sender and receiver.
*/
@Test
void createMultipleTransactionTest() {
setSenderAndReceiver(MessagingEntityType.QUEUE, 0, isSessionEnabled);
StepVerifier.create(receiver.createTransaction())
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(receiver.createTransaction())
.assertNext(Assertions::assertNotNull)
.verifyComplete();
}
/**
* Verifies that we can create transaction and complete.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
/**
* Verifies that we can do following using shared connection and on non session entity. 1. create transaction 2.
* receive and settle with transactionContext. 3. commit Rollback this transaction.
*/
@ParameterizedTest
@EnumSource(DispositionStatus.class)
void transactionSendReceiveAndCommit(DispositionStatus dispositionStatus) {
final MessagingEntityType entityType = MessagingEntityType.QUEUE;
setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_TRANSACTION_SENDRECEIVE_AND_COMPLETE, isSessionEnabled);
final String messageId1 = UUID.randomUUID().toString();
final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled);
final String deadLetterReason = "test reason";
sendMessage(message1).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiver.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
assertNotNull(transaction.get());
final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT);
assertNotNull(receivedMessage);
final Mono<Void> operation;
switch (dispositionStatus) {
case COMPLETED:
operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get()));
messagesPending.decrementAndGet();
break;
case ABANDONED:
operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(transaction.get()));
break;
case SUSPENDED:
DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setTransactionContext(transaction.get())
.setDeadLetterReason(deadLetterReason);
operation = receiver.deadLetter(receivedMessage, deadLetterOptions);
messagesPending.decrementAndGet();
break;
case DEFERRED:
operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(transaction.get()));
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException(
"Disposition status not recognized for this test case: " + dispositionStatus));
}
StepVerifier.create(operation)
.verifyComplete();
StepVerifier.create(receiver.commitTransaction(transaction.get()))
.verifyComplete();
}
/**
* Verifies that we can do following on different clients i.e. sender and receiver. 1. create transaction using
* sender 2. receive and complete with transactionContext. 3. Commit this transaction using sender.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
@Disabled
void transactionReceiveCompleteCommitMixClient(MessagingEntityType entityType) {
final boolean shareConnection = true;
final boolean useCredentials = false;
final int entityIndex = 0;
this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection)
.buildAsyncClient();
this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.buildAsyncClient();
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(sender.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
assertNotNull(transaction.get());
final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT);
assertNotNull(receivedMessage);
StepVerifier.create(receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get())))
.verifyComplete();
StepVerifier.create(sender.commitTransaction(transaction.get()))
.verifyComplete();
}
/**
* Verifies that we can send and receive two messages.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveTwoMessagesAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
final int entityIndex = 0;
final boolean shareConnection = false;
final boolean useCredentials = false;
final Duration shortWait = Duration.ofSeconds(3);
this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection)
.buildAsyncClient();
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
Mono.when(sendMessage(message), sendMessage(message)).block(TIMEOUT);
if (isSessionEnabled) {
assertNotNull(sessionId, "'sessionId' should have been set.");
this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.buildAsyncClient();
this.receiver = sessionReceiver.acceptSession(sessionId).block();
} else {
this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.buildAsyncClient();
}
StepVerifier.create(receiver.receiveMessages())
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
})
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
})
.thenAwait(shortWait)
.thenCancel()
.verify();
}
/**
* Verifies that we can send and receive a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveMessageAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
final int entityIndex = 0;
final boolean shareConnection = false;
final boolean useCredentials = false;
final Duration shortWait = Duration.ofSeconds(3);
this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection)
.buildAsyncClient();
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
if (isSessionEnabled) {
assertNotNull(sessionId, "'sessionId' should have been set.");
this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.buildAsyncClient();
this.receiver = this.sessionReceiver.acceptSession(sessionId).block();
} else {
this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.buildAsyncClient();
}
StepVerifier.create(receiver.receiveMessages())
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
})
.thenAwait(shortWait)
.thenCancel()
.verify();
StepVerifier.create(receiver.receiveMessages())
.thenAwait(shortWait)
.thenCancel()
.verify();
}
/**
* Verifies that we can send and peek a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(receiver.peekMessage())
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.verifyComplete();
StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1))
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.verifyComplete();
}
/**
* Verifies that an empty entity does not error when peeking.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessageEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) {
setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled);
final int fromSequenceNumber = 1;
StepVerifier.create(receiver.peekMessageAt(fromSequenceNumber))
.verifyComplete();
}
/**
* Verifies that we can schedule and receive a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void sendScheduledMessageAndReceive(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final Duration shortDelay = Duration.ofSeconds(4);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(2);
sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(Mono.delay(shortDelay).then(receiver.receiveMessages()
.flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next()))
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
}
/**
* Verifies that we can cancel a scheduled message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void cancelScheduledMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(10);
final Duration delayDuration = Duration.ofSeconds(3);
final Long sequenceNumber = sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT);
logger.verbose("Scheduled the message, sequence number {}.", sequenceNumber);
assertNotNull(sequenceNumber);
Mono.delay(delayDuration)
.then(sender.cancelScheduledMessage(sequenceNumber))
.block(TIMEOUT);
messagesPending.decrementAndGet();
logger.verbose("Cancelled the scheduled message, sequence number {}.", sequenceNumber);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages().take(1))
.thenAwait(Duration.ofSeconds(5))
.thenCancel()
.verify();
}
/**
* Verifies that we can send and peek a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekFromSequenceNumberMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
final int entityIndex = 3;
setSender(entityType, entityIndex, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, entityIndex, isSessionEnabled);
final ServiceBusReceivedMessage peekMessage = receiver.peekMessage().block(TIMEOUT);
assertNotNull(peekMessage);
final long sequenceNumber = peekMessage.getSequenceNumber();
try {
StepVerifier.create(receiver.peekMessageAt(sequenceNumber))
.assertNext(m -> {
assertEquals(sequenceNumber, m.getSequenceNumber());
assertMessageEquals(m, messageId, isSessionEnabled);
})
.verifyComplete();
} finally {
StepVerifier.create(receiver.receiveMessages()
.flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1))
.expectNextCount(1)
.verifyComplete();
messagesPending.decrementAndGet();
}
}
/**
* Verifies that we can send and peek a batch of messages and the sequence number is tracked correctly.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessages(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled);
final BiConsumer<ServiceBusReceivedMessage, Integer> checkCorrectMessage = (message, index) -> {
final Map<String, Object> properties = message.getApplicationProperties();
final Object value = properties.get(MESSAGE_POSITION_ID);
assertTrue(value instanceof Integer, "Did not contain correct position number: " + value);
final int position = (int) value;
assertEquals(index, position);
};
final String messageId = UUID.randomUUID().toString();
final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(10, messageId, CONTENTS_BYTES);
if (isSessionEnabled) {
messages.forEach(m -> m.setSessionId(sessionId));
}
sender.sendMessages(messages)
.doOnSuccess(aVoid -> {
int number = messagesPending.addAndGet(messages.size());
logger.info("Number of messages sent: {}", number);
})
.block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled);
try {
StepVerifier.create(receiver.peekMessages(3))
.assertNext(message -> checkCorrectMessage.accept(message, 0))
.assertNext(message -> checkCorrectMessage.accept(message, 1))
.assertNext(message -> checkCorrectMessage.accept(message, 2))
.verifyComplete();
StepVerifier.create(receiver.peekMessages(4))
.assertNext(message -> checkCorrectMessage.accept(message, 3))
.assertNext(message -> checkCorrectMessage.accept(message, 4))
.assertNext(message -> checkCorrectMessage.accept(message, 5))
.assertNext(message -> checkCorrectMessage.accept(message, 6))
.verifyComplete();
StepVerifier.create(receiver.peekMessage())
.assertNext(message -> checkCorrectMessage.accept(message, 7))
.verifyComplete();
} finally {
AtomicInteger completed = new AtomicInteger();
StepVerifier.create(receiver.receiveMessages().take(messages.size()))
.thenConsumeWhile(receivedMessage -> {
completed.incrementAndGet();
receiver.complete(receivedMessage).block(OPERATION_TIMEOUT);
return completed.get() <= messages.size();
})
.thenCancel()
.verify();
messagesPending.addAndGet(-messages.size());
}
}
/**
* Verifies that we can send and peek a batch of messages.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessagesFromSequence(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_MESSAGE_FROM_SEQUENCE, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
final int maxMessages = 2;
final int fromSequenceNumber = 1;
Mono.when(sendMessage(message), sendMessage(message)).block(TIMEOUT);
StepVerifier.create(receiver.peekMessagesAt(maxMessages, fromSequenceNumber))
.expectNextCount(maxMessages)
.verifyComplete();
StepVerifier.create(receiver.receiveMessages().take(maxMessages))
.assertNext(receivedMessage -> {
receiver.complete(receivedMessage).block(Duration.ofSeconds(15));
})
.assertNext(receivedMessage -> {
receiver.complete(receivedMessage).block(Duration.ofSeconds(15));
})
.expectComplete()
.verify(TIMEOUT);
}
/**
* Verifies that an empty entity does not error when peeking.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessagesFromSequenceEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) {
setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled);
final int maxMessages = 10;
final int fromSequenceNumber = 1;
StepVerifier.create(receiver.peekMessagesAt(maxMessages, fromSequenceNumber))
.verifyComplete();
}
/**
* Verifies that we can dead-letter a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void deadLetterMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
final int entityIndex = 0;
setSender(entityType, entityIndex, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, entityIndex, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages()
.flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1))
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
})
.thenCancel()
.verify();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages()
.flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1))
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
messagesPending.decrementAndGet();
}
/**
* Verifies that we can renew message lock on a non-session receiver.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndRenewLock(MessagingEntityType entityType) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, false);
final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT);
assertNotNull(receivedMessage);
assertNotNull(receivedMessage.getLockedUntil());
final OffsetDateTime initialLock = receivedMessage.getLockedUntil();
logger.info("Received message. Seq: {}. lockedUntil: {}", receivedMessage.getSequenceNumber(), initialLock);
try {
StepVerifier.create(Mono.delay(Duration.ofSeconds(7))
.then(Mono.defer(() -> receiver.renewMessageLock(receivedMessage))))
.assertNext(lockedUntil -> {
assertTrue(lockedUntil.isAfter(initialLock),
String.format("Updated lock is not after the initial Lock. updated: [%s]. initial:[%s]",
lockedUntil, initialLock));
})
.verifyComplete();
} finally {
logger.info("Completing message. Seq: {}.", receivedMessage.getSequenceNumber());
receiver.complete(receivedMessage)
.doOnSuccess(aVoid -> messagesPending.decrementAndGet())
.block(TIMEOUT);
}
}
/**
* Verifies that the lock can be automatically renewed.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void autoRenewLockOnReceiveMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
final AtomicInteger lockRenewCount = new AtomicInteger();
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages().flatMap(received -> {
logger.info("{}: lockToken[{}]. lockedUntil[{}]. now[{}]", received.getSequenceNumber(),
received.getLockToken(), received.getLockedUntil(), OffsetDateTime.now());
while (lockRenewCount.get() < 4) {
lockRenewCount.incrementAndGet();
logger.info("Iteration {}: Curren time {}.", lockRenewCount.get(), OffsetDateTime.now());
try {
TimeUnit.SECONDS.sleep(5);
} catch (InterruptedException error) {
logger.error("Error occurred while sleeping: " + error);
}
}
return receiver.complete(received).thenReturn(received);
}))
.assertNext(received -> {
assertTrue(lockRenewCount.get() > 0);
messagesPending.decrementAndGet();
})
.thenCancel()
.verify();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndAbandon(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages()
.flatMap(receivedMessage -> receiver.abandon(receivedMessage).thenReturn(receivedMessage)).take(1))
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.expectComplete();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndDefer(MessagingEntityType entityType, boolean isSessionEnabled) {
setSender(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled);
AtomicReference<ServiceBusReceivedMessage> received = new AtomicReference<>();
StepVerifier.create(receiver.receiveMessages()
.flatMap(receivedMessage -> receiver.defer(receivedMessage).thenReturn(receivedMessage)).take(1))
.assertNext(m -> {
received.set(m);
assertMessageEquals(m, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
receiver.receiveDeferredMessage(received.get().getSequenceNumber())
.flatMap(m -> receiver.complete(m))
.block(TIMEOUT);
messagesPending.decrementAndGet();
}
/**
* Test we can receive a deferred message via sequence number and then perform abandon, suspend, or complete on it.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveDeferredMessageBySequenceNumber(MessagingEntityType entityType, DispositionStatus dispositionStatus) {
setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFERRED_MESSAGE_BY_SEQUENCE_NUMBER, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT);
assertNotNull(receivedMessage);
receiver.defer(receivedMessage).block(TIMEOUT);
final ServiceBusReceivedMessage receivedDeferredMessage = receiver
.receiveDeferredMessage(receivedMessage.getSequenceNumber())
.block(TIMEOUT);
assertNotNull(receivedDeferredMessage);
assertEquals(receivedMessage.getSequenceNumber(), receivedDeferredMessage.getSequenceNumber());
final Mono<Void> operation;
switch (dispositionStatus) {
case ABANDONED:
operation = receiver.abandon(receivedDeferredMessage);
break;
case SUSPENDED:
operation = receiver.deadLetter(receivedDeferredMessage);
break;
case COMPLETED:
operation = receiver.complete(receivedDeferredMessage);
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException(
"Disposition status not recognized for this test case: " + dispositionStatus));
}
StepVerifier.create(operation)
.expectComplete()
.verify();
if (dispositionStatus != DispositionStatus.COMPLETED) {
messagesPending.decrementAndGet();
}
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void sendReceiveMessageWithVariousPropertyTypes(MessagingEntityType entityType) {
final boolean isSessionEnabled = true;
setSender(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage messageToSend = getMessage(messageId, isSessionEnabled);
Map<String, Object> sentProperties = messageToSend.getApplicationProperties();
sentProperties.put("NullProperty", null);
sentProperties.put("BooleanProperty", true);
sentProperties.put("ByteProperty", (byte) 1);
sentProperties.put("ShortProperty", (short) 2);
sentProperties.put("IntProperty", 3);
sentProperties.put("LongProperty", 4L);
sentProperties.put("FloatProperty", 5.5f);
sentProperties.put("DoubleProperty", 6.6f);
sentProperties.put("CharProperty", 'z');
sentProperties.put("UUIDProperty", UUID.fromString("38400000-8cf0-11bd-b23e-10b96e4ef00d"));
sentProperties.put("StringProperty", "string");
sendMessage(messageToSend).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1))
.assertNext(receivedMessage -> {
messagesPending.decrementAndGet();
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
final Map<String, Object> received = receivedMessage.getApplicationProperties();
assertEquals(sentProperties.size(), received.size());
for (Map.Entry<String, Object> sentEntry : sentProperties.entrySet()) {
if (sentEntry.getValue() != null && sentEntry.getValue().getClass().isArray()) {
assertArrayEquals((Object[]) sentEntry.getValue(), (Object[]) received.get(sentEntry.getKey()));
} else {
final Object expected = sentEntry.getValue();
final Object actual = received.get(sentEntry.getKey());
assertEquals(expected, actual, String.format(
"Key '%s' does not match. Expected: '%s'. Actual: '%s'", sentEntry.getKey(), expected,
actual));
}
}
})
.thenCancel()
.verify();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void setAndGetSessionState(MessagingEntityType entityType) {
setSender(entityType, TestUtils.USE_CASE_DEFAULT, true);
final byte[] sessionState = "Finished".getBytes(UTF_8);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage messageToSend = getMessage(messageId, true);
sendMessage(messageToSend).block(Duration.ofSeconds(10));
setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, true);
StepVerifier.create(receiver.receiveMessages()
.take(1)
.flatMap(message -> {
logger.info("SessionId: {}. LockToken: {}. LockedUntil: {}. Message received.",
message.getSessionId(), message.getLockToken(), message.getLockedUntil());
receiver.complete(message).block(Duration.ofSeconds(15));
messagesPending.decrementAndGet();
return receiver.setSessionState(sessionState);
}))
.expectComplete()
.verify();
StepVerifier.create(receiver.getSessionState())
.assertNext(state -> {
logger.info("State received: {}", new String(state, UTF_8));
assertArrayEquals(sessionState, state);
})
.verifyComplete();
}
/**
* Verifies that we can receive a message from dead letter queue.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveFromDeadLetter(MessagingEntityType entityType) {
final Duration shortWait = Duration.ofSeconds(2);
final boolean isSessionEnabled = false;
final int entityIndex = 0;
setSender(entityType, entityIndex, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final List<ServiceBusReceivedMessage> receivedMessages = new ArrayList<>();
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, entityIndex, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1))
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}).verifyComplete();
final ServiceBusReceiverAsyncClient deadLetterReceiver;
switch (entityType) {
case QUEUE:
final String queueName = getQueueName(entityIndex);
assertNotNull(queueName, "'queueName' cannot be null.");
deadLetterReceiver = getBuilder(false).receiver()
.queueName(queueName)
.subQueue(SubQueue.DEAD_LETTER_QUEUE)
.buildAsyncClient();
break;
case SUBSCRIPTION:
final String topicName = getTopicName(entityIndex);
final String subscriptionName = getSubscriptionBaseName();
assertNotNull(topicName, "'topicName' cannot be null.");
assertNotNull(subscriptionName, "'subscriptionName' cannot be null.");
deadLetterReceiver = getBuilder(false).receiver()
.topicName(topicName)
.subscriptionName(subscriptionName)
.subQueue(SubQueue.DEAD_LETTER_QUEUE)
.buildAsyncClient();
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Unknown entity type: " + entityType));
}
try {
StepVerifier.create(deadLetterReceiver.receiveMessages())
.assertNext(serviceBusReceivedMessage -> {
receivedMessages.add(serviceBusReceivedMessage);
assertMessageEquals(serviceBusReceivedMessage, messageId, isSessionEnabled);
})
.thenAwait(shortWait)
.thenCancel()
.verify();
} finally {
deadLetterReceiver.close();
}
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void renewMessageLock(MessagingEntityType entityType) {
final boolean isSessionEnabled = false;
setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled);
final Duration maximumDuration = Duration.ofSeconds(35);
final Duration sleepDuration = maximumDuration.plusMillis(500);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final ServiceBusReceivedMessage receivedMessage = sendMessage(message)
.then(receiver.receiveMessages().next())
.block(TIMEOUT);
assertNotNull(receivedMessage);
final OffsetDateTime lockedUntil = receivedMessage.getLockedUntil();
assertNotNull(lockedUntil);
StepVerifier.create(receiver.renewMessageLock(receivedMessage, maximumDuration))
.thenAwait(sleepDuration)
.then(() -> {
logger.info("Completing message.");
int numberCompleted = completeMessages(receiver, Collections.singletonList(receivedMessage));
messagesPending.addAndGet(-numberCompleted);
})
.expectComplete()
.verify(Duration.ofMinutes(3));
}
/**
* Verifies that we can receive a message which have different section set (i.e header, footer, annotations,
* application properties etc).
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndValidateProperties(MessagingEntityType entityType) {
final boolean isSessionEnabled = false;
final int totalMessages = 1;
final String subject = "subject";
final Map<String, Object> footer = new HashMap<>();
footer.put("footer-key-1", "footer-value-1");
footer.put("footer-key-2", "footer-value-2");
final Map<String, Object> applicationProperties = new HashMap<>();
applicationProperties.put("ap-key-1", "ap-value-1");
applicationProperties.put("ap-key-2", "ap-value-2");
final Map<String, Object> deliveryAnnotation = new HashMap<>();
deliveryAnnotation.put("delivery-annotations-key-1", "delivery-annotations-value-1");
deliveryAnnotation.put("delivery-annotations-key-2", "delivery-annotations-value-2");
final String messageId = UUID.randomUUID().toString();
final AmqpAnnotatedMessage expectedAmqpProperties = new AmqpAnnotatedMessage(
AmqpMessageBody.fromData(CONTENTS_BYTES));
expectedAmqpProperties.getProperties().setSubject(subject);
expectedAmqpProperties.getProperties().setReplyToGroupId("r-gid");
expectedAmqpProperties.getProperties().setReplyTo(new AmqpAddress("reply-to"));
expectedAmqpProperties.getProperties().setContentType("content-type");
expectedAmqpProperties.getProperties().setCorrelationId(new AmqpMessageId("correlation-id"));
expectedAmqpProperties.getProperties().setTo(new AmqpAddress("to"));
expectedAmqpProperties.getProperties().setAbsoluteExpiryTime(OffsetDateTime.now().plusSeconds(60));
expectedAmqpProperties.getProperties().setUserId("user-id-1".getBytes());
expectedAmqpProperties.getProperties().setContentEncoding("string");
expectedAmqpProperties.getProperties().setGroupSequence(2L);
expectedAmqpProperties.getProperties().setCreationTime(OffsetDateTime.now().plusSeconds(30));
expectedAmqpProperties.getHeader().setPriority((short) 2);
expectedAmqpProperties.getHeader().setFirstAcquirer(true);
expectedAmqpProperties.getHeader().setDurable(true);
expectedAmqpProperties.getFooter().putAll(footer);
expectedAmqpProperties.getDeliveryAnnotations().putAll(deliveryAnnotation);
expectedAmqpProperties.getApplicationProperties().putAll(applicationProperties);
final ServiceBusMessage message = TestUtils.getServiceBusMessage(CONTENTS_BYTES, messageId);
final AmqpAnnotatedMessage amqpAnnotatedMessage = message.getRawAmqpMessage();
amqpAnnotatedMessage.getMessageAnnotations().putAll(expectedAmqpProperties.getMessageAnnotations());
amqpAnnotatedMessage.getApplicationProperties().putAll(expectedAmqpProperties.getApplicationProperties());
amqpAnnotatedMessage.getDeliveryAnnotations().putAll(expectedAmqpProperties.getDeliveryAnnotations());
amqpAnnotatedMessage.getFooter().putAll(expectedAmqpProperties.getFooter());
final AmqpMessageHeader header = amqpAnnotatedMessage.getHeader();
header.setFirstAcquirer(expectedAmqpProperties.getHeader().isFirstAcquirer());
header.setTimeToLive(expectedAmqpProperties.getHeader().getTimeToLive());
header.setDurable(expectedAmqpProperties.getHeader().isDurable());
header.setDeliveryCount(expectedAmqpProperties.getHeader().getDeliveryCount());
header.setPriority(expectedAmqpProperties.getHeader().getPriority());
final AmqpMessageProperties amqpMessageProperties = amqpAnnotatedMessage.getProperties();
amqpMessageProperties.setReplyTo((expectedAmqpProperties.getProperties().getReplyTo()));
amqpMessageProperties.setContentEncoding((expectedAmqpProperties.getProperties().getContentEncoding()));
amqpMessageProperties.setAbsoluteExpiryTime((expectedAmqpProperties.getProperties().getAbsoluteExpiryTime()));
amqpMessageProperties.setSubject((expectedAmqpProperties.getProperties().getSubject()));
amqpMessageProperties.setContentType(expectedAmqpProperties.getProperties().getContentType());
amqpMessageProperties.setCorrelationId(expectedAmqpProperties.getProperties().getCorrelationId());
amqpMessageProperties.setTo(expectedAmqpProperties.getProperties().getTo());
amqpMessageProperties.setGroupSequence(expectedAmqpProperties.getProperties().getGroupSequence());
amqpMessageProperties.setUserId(expectedAmqpProperties.getProperties().getUserId());
amqpMessageProperties.setAbsoluteExpiryTime(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime());
amqpMessageProperties.setCreationTime(expectedAmqpProperties.getProperties().getCreationTime());
amqpMessageProperties.setReplyToGroupId(expectedAmqpProperties.getProperties().getReplyToGroupId());
setSender(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
setReceiver(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled);
StepVerifier.create(receiver.receiveMessages()/*.take(totalMessages)*/)
.assertNext(received -> {
assertNotNull(received.getLockToken());
AmqpAnnotatedMessage actual = received.getRawAmqpMessage();
try {
assertArrayEquals(CONTENTS_BYTES, message.getBody().toBytes());
assertEquals(expectedAmqpProperties.getHeader().getPriority(), actual.getHeader().getPriority());
assertEquals(expectedAmqpProperties.getHeader().isFirstAcquirer(), actual.getHeader().isFirstAcquirer());
assertEquals(expectedAmqpProperties.getHeader().isDurable(), actual.getHeader().isDurable());
assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject());
assertEquals(expectedAmqpProperties.getProperties().getReplyToGroupId(), actual.getProperties().getReplyToGroupId());
assertEquals(expectedAmqpProperties.getProperties().getReplyTo(), actual.getProperties().getReplyTo());
assertEquals(expectedAmqpProperties.getProperties().getContentType(), actual.getProperties().getContentType());
assertEquals(expectedAmqpProperties.getProperties().getCorrelationId(), actual.getProperties().getCorrelationId());
assertEquals(expectedAmqpProperties.getProperties().getTo(), actual.getProperties().getTo());
assertEquals(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime().toEpochSecond(), actual.getProperties().getAbsoluteExpiryTime().toEpochSecond());
assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject());
assertEquals(expectedAmqpProperties.getProperties().getContentEncoding(), actual.getProperties().getContentEncoding());
assertEquals(expectedAmqpProperties.getProperties().getGroupSequence(), actual.getProperties().getGroupSequence());
assertEquals(expectedAmqpProperties.getProperties().getCreationTime().toEpochSecond(), actual.getProperties().getCreationTime().toEpochSecond());
assertArrayEquals(expectedAmqpProperties.getProperties().getUserId(), actual.getProperties().getUserId());
assertMapValues(expectedAmqpProperties.getDeliveryAnnotations(), actual.getDeliveryAnnotations());
assertMapValues(expectedAmqpProperties.getMessageAnnotations(), actual.getMessageAnnotations());
assertMapValues(expectedAmqpProperties.getApplicationProperties(), actual.getApplicationProperties());
assertMapValues(expectedAmqpProperties.getFooter(), actual.getFooter());
} finally {
logger.info("Completing message.");
receiver.complete(received).block(Duration.ofSeconds(15));
messagesPending.decrementAndGet();
}
})
.thenCancel()
.verify(Duration.ofMinutes(2));
}
/**
* Verifies we can autocomplete for a queue.
*
* @param entityType Entity Type.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void autoComplete(MessagingEntityType entityType) {
final Duration shortWait = Duration.ofSeconds(2);
final int index = TestUtils.USE_CASE_AUTO_COMPLETE;
setSender(entityType, index, false);
final int numberOfEvents = 3;
final String messageId = UUID.randomUUID().toString();
final List<ServiceBusMessage> messages = getServiceBusMessages(numberOfEvents, messageId);
setReceiver(entityType, index, false);
final ServiceBusReceivedMessage lastMessage = receiver.peekMessage().block(TIMEOUT);
Mono.when(messages.stream().map(this::sendMessage)
.collect(Collectors.toList()))
.block(TIMEOUT);
final ServiceBusReceiverAsyncClient autoCompleteReceiver =
getReceiverBuilder(false, entityType, index, false)
.buildAsyncClient();
try {
StepVerifier.create(autoCompleteReceiver.receiveMessages())
.assertNext(receivedMessage -> {
if (lastMessage != null) {
assertEquals(lastMessage.getMessageId(), receivedMessage.getMessageId());
} else {
assertEquals(messageId, receivedMessage.getMessageId());
}
})
.assertNext(context -> {
if (lastMessage == null) {
assertEquals(messageId, context.getMessageId());
}
})
.assertNext(context -> {
if (lastMessage == null) {
assertEquals(messageId, context.getMessageId());
}
})
.thenAwait(shortWait)
.thenCancel()
.verify(TIMEOUT);
} finally {
autoCompleteReceiver.close();
}
final ServiceBusReceivedMessage newLastMessage = receiver.peekMessage().block(TIMEOUT);
if (lastMessage == null) {
assertNull(newLastMessage,
String.format("Actual messageId[%s]", newLastMessage != null ? newLastMessage.getMessageId() : "n/a"));
} else {
assertNotNull(newLastMessage);
assertEquals(lastMessage.getSequenceNumber(), newLastMessage.getSequenceNumber());
}
}
/**
* Asserts the length and values with in the map.
*/
private void assertMapValues(Map<String, Object> expectedMap, Map<String, Object> actualMap) {
assertTrue(actualMap.size() >= expectedMap.size());
for (String key : expectedMap.keySet()) {
assertEquals(expectedMap.get(key), actualMap.get(key), "Value is not equal for Key " + key);
}
}
/**
* Sets the sender and receiver. If session is enabled, then a single-named session receiver is created.
*/
private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) {
setSender(entityType, entityIndex, isSessionEnabled);
setReceiver(entityType, entityIndex, isSessionEnabled);
}
private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) {
final boolean shareConnection = false;
final boolean useCredentials = false;
if (isSessionEnabled) {
assertNotNull(sessionId, "'sessionId' should have been set.");
sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.disableAutoComplete()
.buildAsyncClient();
this.receiver = sessionReceiver.acceptSession(sessionId).block();
} else {
this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection)
.disableAutoComplete()
.buildAsyncClient();
}
}
private void setSender(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) {
final boolean shareConnection = false;
final boolean useCredentials = false;
this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection)
.buildAsyncClient();
}
private Mono<Void> sendMessage(ServiceBusMessage message) {
return sender.sendMessage(message).doOnSuccess(aVoid -> {
int number = messagesPending.incrementAndGet();
logger.info("Message Id {}. Number sent: {}", message.getMessageId(), number);
});
}
private int completeMessages(ServiceBusReceiverAsyncClient client, List<ServiceBusReceivedMessage> messages) {
Mono.when(messages.stream().map(e -> client.complete(e))
.collect(Collectors.toList()))
.block(TIMEOUT);
return messages.size();
}
} |
Did `null` or `""` cause an exception or is this a performance enhancement? | private static String sanitizeLogMessageInput(String logMessage) {
if (CoreUtils.isNullOrEmpty(logMessage)) {
return logMessage;
}
return CRLF_PATTERN.matcher(logMessage).replaceAll("");
} | if (CoreUtils.isNullOrEmpty(logMessage)) { | private static String sanitizeLogMessageInput(String logMessage) {
if (CoreUtils.isNullOrEmpty(logMessage)) {
return logMessage;
}
return CRLF_PATTERN.matcher(logMessage).replaceAll("");
} | class name using the {@link LoggerFactory} | class name using the {@link LoggerFactory} |
If both are set, do we want to throw or prioritize `tokenCredential`? | private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.credential != null) {
return new HmacAuthenticationPolicy(this.credential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
} | new IllegalArgumentException("Missing credential information while building a client.")); | private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.accessKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.accessKeyCredential != null) {
return new HmacAuthenticationPolicy(this.accessKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
} | class CommunicationIdentityClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_ADMINISTRATION_PROPERTIES =
"azure-communication-administration.properties";
private final ClientLogger logger = new ClientLogger(CommunicationIdentityClientBuilder.class);
private String endpoint;
private CommunicationClientCredential credential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private Configuration configuration;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_ADMINISTRATION_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationIdentityClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is null.
*/
public CommunicationIdentityClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Set credential to use
*
* @param accessKey access key for initalizing CommunicationClientCredential
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder accessKey(String accessKey) {
Objects.requireNonNull(accessKey, "'accessKey' cannot be null.");
this.credential = new CommunicationClientCredential(accessKey);
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
ConnectionString connectionStringObject = new ConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.accessKey(accessKey);
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder configuration(Configuration configuration) {
this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null.");
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Sets the {@link CommunicationIdentityServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationIdentityServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder serviceVersion(CommunicationIdentityServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationIdentityAsyncClient instance
*/
public CommunicationIdentityAsyncClient buildAsyncClient() {
return new CommunicationIdentityAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationIdentityClient instance
*/
public CommunicationIdentityClient buildClient() {
return new CommunicationIdentityClient(buildAsyncClient());
}
private CommunicationIdentityClientImpl createServiceImpl() {
Objects.requireNonNull(endpoint);
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationIdentityClientImplBuilder clientBuilder = new CommunicationIdentityClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
policies.add(authorizationPolicy);
applyRequiredPolicies(policies);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, configuration));
policies.add(new RetryPolicy());
policies.add(new CookiePolicy());
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} | class CommunicationIdentityClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_ADMINISTRATION_PROPERTIES =
"azure-communication-administration.properties";
private final ClientLogger logger = new ClientLogger(CommunicationIdentityClientBuilder.class);
private String endpoint;
private CommunicationClientCredential accessKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private Configuration configuration;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_ADMINISTRATION_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationIdentityClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is null.
*/
public CommunicationIdentityClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Set credential to use
*
* @param accessKey access key for initalizing CommunicationClientCredential
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder accessKey(String accessKey) {
Objects.requireNonNull(accessKey, "'accessKey' cannot be null.");
this.accessKeyCredential = new CommunicationClientCredential(accessKey);
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
ConnectionString connectionStringObject = new ConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.accessKey(accessKey);
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder configuration(Configuration configuration) {
this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null.");
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Sets the {@link CommunicationIdentityServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationIdentityServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder serviceVersion(CommunicationIdentityServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationIdentityAsyncClient instance
*/
public CommunicationIdentityAsyncClient buildAsyncClient() {
return new CommunicationIdentityAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationIdentityClient instance
*/
public CommunicationIdentityClient buildClient() {
return new CommunicationIdentityClient(buildAsyncClient());
}
private CommunicationIdentityClientImpl createServiceImpl() {
Objects.requireNonNull(endpoint);
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationIdentityClientImplBuilder clientBuilder = new CommunicationIdentityClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
policies.add(authorizationPolicy);
applyRequiredPolicies(policies);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, configuration));
policies.add(new RetryPolicy());
policies.add(new CookiePolicy());
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} |
nit: `else` should be in the previous line along with `}` | protected CommunicationIdentityClientBuilder getCommunicationIdentityClientBuilderUsingManagedIdentity(HttpClient httpClient) {
CommunicationIdentityClientBuilder builder = new CommunicationIdentityClientBuilder();
builder
.endpoint(ENDPOINT)
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient);
if (getTestMode() == TestMode.PLAYBACK) {
builder.credential(new FakeCredentials());
}
else {
builder.credential(new DefaultAzureCredentialBuilder().build());
}
if (getTestMode() == TestMode.RECORD) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
} | else { | protected CommunicationIdentityClientBuilder getCommunicationIdentityClientBuilderUsingManagedIdentity(HttpClient httpClient) {
CommunicationIdentityClientBuilder builder = new CommunicationIdentityClientBuilder();
builder
.endpoint(ENDPOINT)
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient);
if (getTestMode() == TestMode.PLAYBACK) {
builder.credential(new FakeCredentials());
} else {
builder.credential(new DefaultAzureCredentialBuilder().build());
}
if (getTestMode() == TestMode.RECORD) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
} | class CommunicationIdentityClientTestBase extends TestBase {
protected static final TestMode TEST_MODE = initializeTestMode();
protected static final String ENDPOINT = Configuration.getGlobalConfiguration()
.get("ADMINISTRATION_SERVICE_ENDPOINT", "https:
protected static final String ACCESSKEYRAW = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c";
protected static final String ACCESSKEYENCODED = Base64.getEncoder().encodeToString(ACCESSKEYRAW.getBytes());
protected static final String ACCESSKEY = Configuration.getGlobalConfiguration()
.get("ADMINISTRATION_SERVICE_ACCESS_TOKEN", ACCESSKEYENCODED);
protected static final String CONNECTION_STRING = Configuration.getGlobalConfiguration()
.get("COMMUNICATION_CONNECTION_STRING", "endpoint=https:
protected CommunicationIdentityClientBuilder getCommunicationIdentityClient(HttpClient httpClient) {
CommunicationIdentityClientBuilder builder = new CommunicationIdentityClientBuilder();
builder.endpoint(ENDPOINT)
.accessKey(ACCESSKEY)
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient);
if (getTestMode() == TestMode.RECORD) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
protected CommunicationIdentityClientBuilder getCommunicationIdentityClientUsingConnectionString(HttpClient httpClient) {
CommunicationIdentityClientBuilder builder = new CommunicationIdentityClientBuilder();
builder
.connectionString(CONNECTION_STRING)
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient);
if (getTestMode() == TestMode.RECORD) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
private static TestMode initializeTestMode() {
ClientLogger logger = new ClientLogger(CommunicationIdentityClientTestBase.class);
String azureTestMode = Configuration.getGlobalConfiguration().get("AZURE_TEST_MODE");
if (azureTestMode != null) {
System.out.println("azureTestMode: " + azureTestMode);
try {
return TestMode.valueOf(azureTestMode.toUpperCase(Locale.US));
} catch (IllegalArgumentException var3) {
logger.error("Could not parse '{}' into TestEnum. Using 'Playback' mode.", azureTestMode);
return TestMode.PLAYBACK;
}
} else {
logger.info("Environment variable '{}' has not been set yet. Using 'Playback' mode.", "AZURE_TEST_MODE");
return TestMode.PLAYBACK;
}
}
static class FakeCredentials implements TokenCredential {
@Override
public Mono<AccessToken> getToken(TokenRequestContext tokenRequestContext) {
return Mono.just(new AccessToken("someFakeToken", OffsetDateTime.MAX));
}
}
} | class CommunicationIdentityClientTestBase extends TestBase {
protected static final TestMode TEST_MODE = initializeTestMode();
protected static final String ENDPOINT = Configuration.getGlobalConfiguration()
.get("ADMINISTRATION_SERVICE_ENDPOINT", "https:
protected static final String ACCESSKEYRAW = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c";
protected static final String ACCESSKEYENCODED = Base64.getEncoder().encodeToString(ACCESSKEYRAW.getBytes());
protected static final String ACCESSKEY = Configuration.getGlobalConfiguration()
.get("ADMINISTRATION_SERVICE_ACCESS_TOKEN", ACCESSKEYENCODED);
protected static final String CONNECTION_STRING = Configuration.getGlobalConfiguration()
.get("COMMUNICATION_CONNECTION_STRING", "endpoint=https:
protected CommunicationIdentityClientBuilder getCommunicationIdentityClient(HttpClient httpClient) {
CommunicationIdentityClientBuilder builder = new CommunicationIdentityClientBuilder();
builder.endpoint(ENDPOINT)
.accessKey(ACCESSKEY)
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient);
if (getTestMode() == TestMode.RECORD) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
protected CommunicationIdentityClientBuilder getCommunicationIdentityClientUsingConnectionString(HttpClient httpClient) {
CommunicationIdentityClientBuilder builder = new CommunicationIdentityClientBuilder();
builder
.connectionString(CONNECTION_STRING)
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient);
if (getTestMode() == TestMode.RECORD) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
private static TestMode initializeTestMode() {
ClientLogger logger = new ClientLogger(CommunicationIdentityClientTestBase.class);
String azureTestMode = Configuration.getGlobalConfiguration().get("AZURE_TEST_MODE");
if (azureTestMode != null) {
System.out.println("azureTestMode: " + azureTestMode);
try {
return TestMode.valueOf(azureTestMode.toUpperCase(Locale.US));
} catch (IllegalArgumentException var3) {
logger.error("Could not parse '{}' into TestEnum. Using 'Playback' mode.", azureTestMode);
return TestMode.PLAYBACK;
}
} else {
logger.info("Environment variable '{}' has not been set yet. Using 'Playback' mode.", "AZURE_TEST_MODE");
return TestMode.PLAYBACK;
}
}
static class FakeCredentials implements TokenCredential {
@Override
public Mono<AccessToken> getToken(TokenRequestContext tokenRequestContext) {
return Mono.just(new AccessToken("someFakeToken", OffsetDateTime.MAX));
}
}
} |
If both are set, I think we should prioritize tokenCredential as it is now, right? It makes sense to use that one as our default. | private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.credential != null) {
return new HmacAuthenticationPolicy(this.credential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
} | new IllegalArgumentException("Missing credential information while building a client.")); | private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.accessKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.accessKeyCredential != null) {
return new HmacAuthenticationPolicy(this.accessKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
} | class CommunicationIdentityClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_ADMINISTRATION_PROPERTIES =
"azure-communication-administration.properties";
private final ClientLogger logger = new ClientLogger(CommunicationIdentityClientBuilder.class);
private String endpoint;
private CommunicationClientCredential credential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private Configuration configuration;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_ADMINISTRATION_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationIdentityClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is null.
*/
public CommunicationIdentityClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Set credential to use
*
* @param accessKey access key for initalizing CommunicationClientCredential
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder accessKey(String accessKey) {
Objects.requireNonNull(accessKey, "'accessKey' cannot be null.");
this.credential = new CommunicationClientCredential(accessKey);
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
ConnectionString connectionStringObject = new ConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.accessKey(accessKey);
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder configuration(Configuration configuration) {
this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null.");
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Sets the {@link CommunicationIdentityServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationIdentityServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder serviceVersion(CommunicationIdentityServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationIdentityAsyncClient instance
*/
public CommunicationIdentityAsyncClient buildAsyncClient() {
return new CommunicationIdentityAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationIdentityClient instance
*/
public CommunicationIdentityClient buildClient() {
return new CommunicationIdentityClient(buildAsyncClient());
}
private CommunicationIdentityClientImpl createServiceImpl() {
Objects.requireNonNull(endpoint);
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationIdentityClientImplBuilder clientBuilder = new CommunicationIdentityClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
policies.add(authorizationPolicy);
applyRequiredPolicies(policies);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, configuration));
policies.add(new RetryPolicy());
policies.add(new CookiePolicy());
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} | class CommunicationIdentityClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_ADMINISTRATION_PROPERTIES =
"azure-communication-administration.properties";
private final ClientLogger logger = new ClientLogger(CommunicationIdentityClientBuilder.class);
private String endpoint;
private CommunicationClientCredential accessKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private Configuration configuration;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_ADMINISTRATION_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationIdentityClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is null.
*/
public CommunicationIdentityClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Set credential to use
*
* @param accessKey access key for initalizing CommunicationClientCredential
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder accessKey(String accessKey) {
Objects.requireNonNull(accessKey, "'accessKey' cannot be null.");
this.accessKeyCredential = new CommunicationClientCredential(accessKey);
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
ConnectionString connectionStringObject = new ConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.accessKey(accessKey);
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder configuration(Configuration configuration) {
this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null.");
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Sets the {@link CommunicationIdentityServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationIdentityServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder serviceVersion(CommunicationIdentityServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationIdentityAsyncClient instance
*/
public CommunicationIdentityAsyncClient buildAsyncClient() {
return new CommunicationIdentityAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationIdentityClient instance
*/
public CommunicationIdentityClient buildClient() {
return new CommunicationIdentityClient(buildAsyncClient());
}
private CommunicationIdentityClientImpl createServiceImpl() {
Objects.requireNonNull(endpoint);
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationIdentityClientImplBuilder clientBuilder = new CommunicationIdentityClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
policies.add(authorizationPolicy);
applyRequiredPolicies(policies);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, configuration));
policies.add(new RetryPolicy());
policies.add(new CookiePolicy());
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} |
Instead of implementing a `FakeCredential` you can just use the `AzureKeyCredential` and delete your class. ```suggestion builder.credential(new AzureKeyCredential("invalid key")); ``` | protected CommunicationIdentityClientBuilder getCommunicationIdentityClientBuilderUsingManagedIdentity(HttpClient httpClient) {
CommunicationIdentityClientBuilder builder = new CommunicationIdentityClientBuilder();
builder
.endpoint(ENDPOINT)
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient);
if (getTestMode() == TestMode.PLAYBACK) {
builder.credential(new FakeCredentials());
} else {
builder.credential(new DefaultAzureCredentialBuilder().build());
}
if (getTestMode() == TestMode.RECORD) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
} | builder.credential(new FakeCredentials()); | protected CommunicationIdentityClientBuilder getCommunicationIdentityClientBuilderUsingManagedIdentity(HttpClient httpClient) {
CommunicationIdentityClientBuilder builder = new CommunicationIdentityClientBuilder();
builder
.endpoint(ENDPOINT)
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient);
if (getTestMode() == TestMode.PLAYBACK) {
builder.credential(new FakeCredentials());
} else {
builder.credential(new DefaultAzureCredentialBuilder().build());
}
if (getTestMode() == TestMode.RECORD) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
} | class CommunicationIdentityClientTestBase extends TestBase {
protected static final TestMode TEST_MODE = initializeTestMode();
protected static final String ENDPOINT = Configuration.getGlobalConfiguration()
.get("ADMINISTRATION_SERVICE_ENDPOINT", "https:
protected static final String ACCESSKEYRAW = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c";
protected static final String ACCESSKEYENCODED = Base64.getEncoder().encodeToString(ACCESSKEYRAW.getBytes());
protected static final String ACCESSKEY = Configuration.getGlobalConfiguration()
.get("ADMINISTRATION_SERVICE_ACCESS_TOKEN", ACCESSKEYENCODED);
protected static final String CONNECTION_STRING = Configuration.getGlobalConfiguration()
.get("COMMUNICATION_CONNECTION_STRING", "endpoint=https:
protected CommunicationIdentityClientBuilder getCommunicationIdentityClient(HttpClient httpClient) {
CommunicationIdentityClientBuilder builder = new CommunicationIdentityClientBuilder();
builder.endpoint(ENDPOINT)
.accessKey(ACCESSKEY)
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient);
if (getTestMode() == TestMode.RECORD) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
protected CommunicationIdentityClientBuilder getCommunicationIdentityClientUsingConnectionString(HttpClient httpClient) {
CommunicationIdentityClientBuilder builder = new CommunicationIdentityClientBuilder();
builder
.connectionString(CONNECTION_STRING)
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient);
if (getTestMode() == TestMode.RECORD) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
private static TestMode initializeTestMode() {
ClientLogger logger = new ClientLogger(CommunicationIdentityClientTestBase.class);
String azureTestMode = Configuration.getGlobalConfiguration().get("AZURE_TEST_MODE");
if (azureTestMode != null) {
System.out.println("azureTestMode: " + azureTestMode);
try {
return TestMode.valueOf(azureTestMode.toUpperCase(Locale.US));
} catch (IllegalArgumentException var3) {
logger.error("Could not parse '{}' into TestEnum. Using 'Playback' mode.", azureTestMode);
return TestMode.PLAYBACK;
}
} else {
logger.info("Environment variable '{}' has not been set yet. Using 'Playback' mode.", "AZURE_TEST_MODE");
return TestMode.PLAYBACK;
}
}
static class FakeCredentials implements TokenCredential {
@Override
public Mono<AccessToken> getToken(TokenRequestContext tokenRequestContext) {
return Mono.just(new AccessToken("someFakeToken", OffsetDateTime.MAX));
}
}
} | class CommunicationIdentityClientTestBase extends TestBase {
protected static final TestMode TEST_MODE = initializeTestMode();
protected static final String ENDPOINT = Configuration.getGlobalConfiguration()
.get("ADMINISTRATION_SERVICE_ENDPOINT", "https:
protected static final String ACCESSKEYRAW = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c";
protected static final String ACCESSKEYENCODED = Base64.getEncoder().encodeToString(ACCESSKEYRAW.getBytes());
protected static final String ACCESSKEY = Configuration.getGlobalConfiguration()
.get("ADMINISTRATION_SERVICE_ACCESS_TOKEN", ACCESSKEYENCODED);
protected static final String CONNECTION_STRING = Configuration.getGlobalConfiguration()
.get("COMMUNICATION_CONNECTION_STRING", "endpoint=https:
protected CommunicationIdentityClientBuilder getCommunicationIdentityClient(HttpClient httpClient) {
CommunicationIdentityClientBuilder builder = new CommunicationIdentityClientBuilder();
builder.endpoint(ENDPOINT)
.accessKey(ACCESSKEY)
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient);
if (getTestMode() == TestMode.RECORD) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
protected CommunicationIdentityClientBuilder getCommunicationIdentityClientUsingConnectionString(HttpClient httpClient) {
CommunicationIdentityClientBuilder builder = new CommunicationIdentityClientBuilder();
builder
.connectionString(CONNECTION_STRING)
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient);
if (getTestMode() == TestMode.RECORD) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
private static TestMode initializeTestMode() {
ClientLogger logger = new ClientLogger(CommunicationIdentityClientTestBase.class);
String azureTestMode = Configuration.getGlobalConfiguration().get("AZURE_TEST_MODE");
if (azureTestMode != null) {
System.out.println("azureTestMode: " + azureTestMode);
try {
return TestMode.valueOf(azureTestMode.toUpperCase(Locale.US));
} catch (IllegalArgumentException var3) {
logger.error("Could not parse '{}' into TestEnum. Using 'Playback' mode.", azureTestMode);
return TestMode.PLAYBACK;
}
} else {
logger.info("Environment variable '{}' has not been set yet. Using 'Playback' mode.", "AZURE_TEST_MODE");
return TestMode.PLAYBACK;
}
}
static class FakeCredentials implements TokenCredential {
@Override
public Mono<AccessToken> getToken(TokenRequestContext tokenRequestContext) {
return Mono.just(new AccessToken("someFakeToken", OffsetDateTime.MAX));
}
}
} |
But AzureKeyCredential doesn't implement TokenCredential interface | protected CommunicationIdentityClientBuilder getCommunicationIdentityClientBuilderUsingManagedIdentity(HttpClient httpClient) {
CommunicationIdentityClientBuilder builder = new CommunicationIdentityClientBuilder();
builder
.endpoint(ENDPOINT)
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient);
if (getTestMode() == TestMode.PLAYBACK) {
builder.credential(new FakeCredentials());
} else {
builder.credential(new DefaultAzureCredentialBuilder().build());
}
if (getTestMode() == TestMode.RECORD) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
} | builder.credential(new FakeCredentials()); | protected CommunicationIdentityClientBuilder getCommunicationIdentityClientBuilderUsingManagedIdentity(HttpClient httpClient) {
CommunicationIdentityClientBuilder builder = new CommunicationIdentityClientBuilder();
builder
.endpoint(ENDPOINT)
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient);
if (getTestMode() == TestMode.PLAYBACK) {
builder.credential(new FakeCredentials());
} else {
builder.credential(new DefaultAzureCredentialBuilder().build());
}
if (getTestMode() == TestMode.RECORD) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
} | class CommunicationIdentityClientTestBase extends TestBase {
protected static final TestMode TEST_MODE = initializeTestMode();
protected static final String ENDPOINT = Configuration.getGlobalConfiguration()
.get("ADMINISTRATION_SERVICE_ENDPOINT", "https:
protected static final String ACCESSKEYRAW = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c";
protected static final String ACCESSKEYENCODED = Base64.getEncoder().encodeToString(ACCESSKEYRAW.getBytes());
protected static final String ACCESSKEY = Configuration.getGlobalConfiguration()
.get("ADMINISTRATION_SERVICE_ACCESS_TOKEN", ACCESSKEYENCODED);
protected static final String CONNECTION_STRING = Configuration.getGlobalConfiguration()
.get("COMMUNICATION_CONNECTION_STRING", "endpoint=https:
protected CommunicationIdentityClientBuilder getCommunicationIdentityClient(HttpClient httpClient) {
CommunicationIdentityClientBuilder builder = new CommunicationIdentityClientBuilder();
builder.endpoint(ENDPOINT)
.accessKey(ACCESSKEY)
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient);
if (getTestMode() == TestMode.RECORD) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
protected CommunicationIdentityClientBuilder getCommunicationIdentityClientUsingConnectionString(HttpClient httpClient) {
CommunicationIdentityClientBuilder builder = new CommunicationIdentityClientBuilder();
builder
.connectionString(CONNECTION_STRING)
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient);
if (getTestMode() == TestMode.RECORD) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
private static TestMode initializeTestMode() {
ClientLogger logger = new ClientLogger(CommunicationIdentityClientTestBase.class);
String azureTestMode = Configuration.getGlobalConfiguration().get("AZURE_TEST_MODE");
if (azureTestMode != null) {
System.out.println("azureTestMode: " + azureTestMode);
try {
return TestMode.valueOf(azureTestMode.toUpperCase(Locale.US));
} catch (IllegalArgumentException var3) {
logger.error("Could not parse '{}' into TestEnum. Using 'Playback' mode.", azureTestMode);
return TestMode.PLAYBACK;
}
} else {
logger.info("Environment variable '{}' has not been set yet. Using 'Playback' mode.", "AZURE_TEST_MODE");
return TestMode.PLAYBACK;
}
}
static class FakeCredentials implements TokenCredential {
@Override
public Mono<AccessToken> getToken(TokenRequestContext tokenRequestContext) {
return Mono.just(new AccessToken("someFakeToken", OffsetDateTime.MAX));
}
}
} | class CommunicationIdentityClientTestBase extends TestBase {
protected static final TestMode TEST_MODE = initializeTestMode();
protected static final String ENDPOINT = Configuration.getGlobalConfiguration()
.get("ADMINISTRATION_SERVICE_ENDPOINT", "https:
protected static final String ACCESSKEYRAW = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c";
protected static final String ACCESSKEYENCODED = Base64.getEncoder().encodeToString(ACCESSKEYRAW.getBytes());
protected static final String ACCESSKEY = Configuration.getGlobalConfiguration()
.get("ADMINISTRATION_SERVICE_ACCESS_TOKEN", ACCESSKEYENCODED);
protected static final String CONNECTION_STRING = Configuration.getGlobalConfiguration()
.get("COMMUNICATION_CONNECTION_STRING", "endpoint=https:
protected CommunicationIdentityClientBuilder getCommunicationIdentityClient(HttpClient httpClient) {
CommunicationIdentityClientBuilder builder = new CommunicationIdentityClientBuilder();
builder.endpoint(ENDPOINT)
.accessKey(ACCESSKEY)
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient);
if (getTestMode() == TestMode.RECORD) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
protected CommunicationIdentityClientBuilder getCommunicationIdentityClientUsingConnectionString(HttpClient httpClient) {
CommunicationIdentityClientBuilder builder = new CommunicationIdentityClientBuilder();
builder
.connectionString(CONNECTION_STRING)
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient);
if (getTestMode() == TestMode.RECORD) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
private static TestMode initializeTestMode() {
ClientLogger logger = new ClientLogger(CommunicationIdentityClientTestBase.class);
String azureTestMode = Configuration.getGlobalConfiguration().get("AZURE_TEST_MODE");
if (azureTestMode != null) {
System.out.println("azureTestMode: " + azureTestMode);
try {
return TestMode.valueOf(azureTestMode.toUpperCase(Locale.US));
} catch (IllegalArgumentException var3) {
logger.error("Could not parse '{}' into TestEnum. Using 'Playback' mode.", azureTestMode);
return TestMode.PLAYBACK;
}
} else {
logger.info("Environment variable '{}' has not been set yet. Using 'Playback' mode.", "AZURE_TEST_MODE");
return TestMode.PLAYBACK;
}
}
static class FakeCredentials implements TokenCredential {
@Override
public Mono<AccessToken> getToken(TokenRequestContext tokenRequestContext) {
return Mono.just(new AccessToken("someFakeToken", OffsetDateTime.MAX));
}
}
} |
We should not change the value of `accessKeyCredential` the user has already set. This will silently set the value to null and the user will not know about it - logs are not always noticed. I would rather do the validation at the very end when `build*Client` is called. Check if both are set and then throw an exception. | public CommunicationIdentityClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
if (accessKeyCredential != null) {
logger.info("'accessKey' is being replaced with 'credential', only one authorization method can be used.");
this.accessKeyCredential = null;
}
return this;
} | } | public CommunicationIdentityClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
} | class CommunicationIdentityClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_ADMINISTRATION_PROPERTIES =
"azure-communication-administration.properties";
private final ClientLogger logger = new ClientLogger(CommunicationIdentityClientBuilder.class);
private String endpoint;
private CommunicationClientCredential accessKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private Configuration configuration;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_ADMINISTRATION_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationIdentityClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is null.
*/
/**
* Set credential to use
*
* @param accessKey access key for initalizing CommunicationClientCredential
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder accessKey(String accessKey) {
Objects.requireNonNull(accessKey, "'accessKey' cannot be null.");
this.accessKeyCredential = new CommunicationClientCredential(accessKey);
if (tokenCredential != null) {
logger.info("'credential' is being replaced with 'accessKey', only one authorization method can be used.");
this.tokenCredential = null;
}
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
ConnectionString connectionStringObject = new ConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.accessKey(accessKey);
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder configuration(Configuration configuration) {
this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null.");
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Sets the {@link CommunicationIdentityServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationIdentityServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder serviceVersion(CommunicationIdentityServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationIdentityAsyncClient instance
*/
public CommunicationIdentityAsyncClient buildAsyncClient() {
return new CommunicationIdentityAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationIdentityClient instance
*/
public CommunicationIdentityClient buildClient() {
return new CommunicationIdentityClient(buildAsyncClient());
}
private CommunicationIdentityClientImpl createServiceImpl() {
Objects.requireNonNull(endpoint);
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationIdentityClientImplBuilder clientBuilder = new CommunicationIdentityClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.accessKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.accessKeyCredential != null) {
return new HmacAuthenticationPolicy(this.accessKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
policies.add(authorizationPolicy);
applyRequiredPolicies(policies);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, configuration));
policies.add(new RetryPolicy());
policies.add(new CookiePolicy());
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} | class CommunicationIdentityClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_ADMINISTRATION_PROPERTIES =
"azure-communication-administration.properties";
private final ClientLogger logger = new ClientLogger(CommunicationIdentityClientBuilder.class);
private String endpoint;
private CommunicationClientCredential accessKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private Configuration configuration;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_ADMINISTRATION_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationIdentityClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is null.
*/
/**
* Set credential to use
*
* @param accessKey access key for initalizing CommunicationClientCredential
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder accessKey(String accessKey) {
Objects.requireNonNull(accessKey, "'accessKey' cannot be null.");
this.accessKeyCredential = new CommunicationClientCredential(accessKey);
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
ConnectionString connectionStringObject = new ConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.accessKey(accessKey);
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder configuration(Configuration configuration) {
this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null.");
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Sets the {@link CommunicationIdentityServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationIdentityServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder serviceVersion(CommunicationIdentityServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationIdentityAsyncClient instance
*/
public CommunicationIdentityAsyncClient buildAsyncClient() {
return new CommunicationIdentityAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationIdentityClient instance
*/
public CommunicationIdentityClient buildClient() {
return new CommunicationIdentityClient(buildAsyncClient());
}
private CommunicationIdentityClientImpl createServiceImpl() {
Objects.requireNonNull(endpoint);
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationIdentityClientImplBuilder clientBuilder = new CommunicationIdentityClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.accessKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.accessKeyCredential != null) {
return new HmacAuthenticationPolicy(this.accessKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
policies.add(authorizationPolicy);
applyRequiredPolicies(policies);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, configuration));
policies.add(new RetryPolicy());
policies.add(new CookiePolicy());
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} |
This is good! Since we have this check here, we don't need to do any validation in the setter methods above. | private CommunicationIdentityClientImpl createServiceImpl() {
Objects.requireNonNull(endpoint);
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationIdentityClientImplBuilder clientBuilder = new CommunicationIdentityClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
} | private CommunicationIdentityClientImpl createServiceImpl() {
Objects.requireNonNull(endpoint);
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationIdentityClientImplBuilder clientBuilder = new CommunicationIdentityClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
} | class CommunicationIdentityClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_ADMINISTRATION_PROPERTIES =
"azure-communication-administration.properties";
private final ClientLogger logger = new ClientLogger(CommunicationIdentityClientBuilder.class);
private String endpoint;
private CommunicationClientCredential accessKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private Configuration configuration;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_ADMINISTRATION_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationIdentityClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is null.
*/
public CommunicationIdentityClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
if (accessKeyCredential != null) {
logger.info("'accessKey' is being replaced with 'credential', only one authorization method can be used.");
this.accessKeyCredential = null;
}
return this;
}
/**
* Set credential to use
*
* @param accessKey access key for initalizing CommunicationClientCredential
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder accessKey(String accessKey) {
Objects.requireNonNull(accessKey, "'accessKey' cannot be null.");
this.accessKeyCredential = new CommunicationClientCredential(accessKey);
if (tokenCredential != null) {
logger.info("'credential' is being replaced with 'accessKey', only one authorization method can be used.");
this.tokenCredential = null;
}
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
ConnectionString connectionStringObject = new ConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.accessKey(accessKey);
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder configuration(Configuration configuration) {
this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null.");
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Sets the {@link CommunicationIdentityServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationIdentityServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder serviceVersion(CommunicationIdentityServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationIdentityAsyncClient instance
*/
public CommunicationIdentityAsyncClient buildAsyncClient() {
return new CommunicationIdentityAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationIdentityClient instance
*/
public CommunicationIdentityClient buildClient() {
return new CommunicationIdentityClient(buildAsyncClient());
}
private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.accessKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.accessKeyCredential != null) {
return new HmacAuthenticationPolicy(this.accessKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
policies.add(authorizationPolicy);
applyRequiredPolicies(policies);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, configuration));
policies.add(new RetryPolicy());
policies.add(new CookiePolicy());
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} | class CommunicationIdentityClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_ADMINISTRATION_PROPERTIES =
"azure-communication-administration.properties";
private final ClientLogger logger = new ClientLogger(CommunicationIdentityClientBuilder.class);
private String endpoint;
private CommunicationClientCredential accessKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private Configuration configuration;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_ADMINISTRATION_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationIdentityClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is null.
*/
public CommunicationIdentityClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Set credential to use
*
* @param accessKey access key for initalizing CommunicationClientCredential
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder accessKey(String accessKey) {
Objects.requireNonNull(accessKey, "'accessKey' cannot be null.");
this.accessKeyCredential = new CommunicationClientCredential(accessKey);
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
ConnectionString connectionStringObject = new ConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.accessKey(accessKey);
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationIdentityClientBuilder
*/
public CommunicationIdentityClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder configuration(Configuration configuration) {
this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null.");
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Sets the {@link CommunicationIdentityServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationIdentityServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationIdentityClientBuilder object
*/
public CommunicationIdentityClientBuilder serviceVersion(CommunicationIdentityServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationIdentityAsyncClient instance
*/
public CommunicationIdentityAsyncClient buildAsyncClient() {
return new CommunicationIdentityAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationIdentityClient instance
*/
public CommunicationIdentityClient buildClient() {
return new CommunicationIdentityClient(buildAsyncClient());
}
private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.accessKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.accessKeyCredential != null) {
return new HmacAuthenticationPolicy(this.accessKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
policies.add(authorizationPolicy);
applyRequiredPolicies(policies);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, configuration));
policies.add(new RetryPolicy());
policies.add(new CookiePolicy());
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} | |
nit: 20 mins seems too much, 5 minutes should be enough? | public static void main(String[] args) {
TextAnalyticsAsyncClient client =
new TextAnalyticsClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("{endpoint}")
.buildAsyncClient();
List<TextDocumentInput> documents = new ArrayList<>();
for (int i = 0; i < 3; i++) {
documents.add(new TextDocumentInput(Integer.toString(i),
"The patient is a 54-year-old gentleman with a history of progressive angina over the past several"
+ " months."));
}
AnalyzeHealthcareEntitiesOptions options = new AnalyzeHealthcareEntitiesOptions()
.setIncludeStatistics(true);
client.beginAnalyzeHealthcareEntities(documents, options)
.flatMap(pollResult -> {
AnalyzeHealthcareEntitiesOperationDetail operationResult = pollResult.getValue();
System.out.printf("Operation created time: %s, expiration time: %s.%n",
operationResult.getCreatedAt(), operationResult.getExpiresAt());
return pollResult.getFinalResult();
})
.subscribe(healthcareTaskResultPagedFlux -> healthcareTaskResultPagedFlux.subscribe(
healthcareEntitiesResultCollection -> {
System.out.printf("Results of Azure Text Analytics \"Analyze Healthcare\" Model, version: %s%n",
healthcareEntitiesResultCollection.getModelVersion());
TextDocumentBatchStatistics batchStatistics = healthcareEntitiesResultCollection.getStatistics();
System.out.printf("Documents statistics: document count = %s, erroneous document count = %s,"
+ " transaction count = %s, valid document count = %s.%n",
batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(),
batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
healthcareEntitiesResultCollection.forEach(healthcareEntitiesResult -> {
System.out.println("Document id = " + healthcareEntitiesResult.getId());
System.out.println("Document entities: ");
AtomicInteger ct = new AtomicInteger();
healthcareEntitiesResult.getEntities().forEach(healthcareEntity -> {
System.out.printf(
"\ti = %d, Text: %s, category: %s, subcategory: %s, confidence score: %f.%n",
ct.getAndIncrement(), healthcareEntity.getText(), healthcareEntity.getCategory(),
healthcareEntity.getSubcategory(), healthcareEntity.getConfidenceScore());
IterableStream<EntityDataSource> dataSources = healthcareEntity.getDataSources();
if (dataSources != null) {
dataSources.forEach(dataSource -> System.out.printf(
"\t\tEntity ID in data source: %s, data source: %s.%n",
dataSource.getEntityId(), dataSource.getName()));
}
Map<HealthcareEntity, HealthcareEntityRelationType> relatedHealthcareEntities =
healthcareEntity.getRelatedEntities();
if (!CoreUtils.isNullOrEmpty(relatedHealthcareEntities)) {
relatedHealthcareEntities.forEach(
(relatedHealthcareEntity, entityRelationType) -> System.out.printf(
"\t\tRelated entity: %s, relation type: %s.%n",
relatedHealthcareEntity.getText(), entityRelationType));
}
});
});
}
));
try {
TimeUnit.MINUTES.sleep(20);
} catch (InterruptedException e) {
e.printStackTrace();
}
} | TimeUnit.MINUTES.sleep(20); | public static void main(String[] args) {
TextAnalyticsAsyncClient client =
new TextAnalyticsClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("{endpoint}")
.buildAsyncClient();
List<TextDocumentInput> documents = new ArrayList<>();
for (int i = 0; i < 3; i++) {
documents.add(new TextDocumentInput(Integer.toString(i),
"The patient is a 54-year-old gentleman with a history of progressive angina over the past several"
+ " months."));
}
AnalyzeHealthcareEntitiesOptions options = new AnalyzeHealthcareEntitiesOptions()
.setIncludeStatistics(true);
client.beginAnalyzeHealthcareEntities(documents, options)
.flatMap(pollResult -> {
AnalyzeHealthcareEntitiesOperationDetail operationResult = pollResult.getValue();
System.out.printf("Operation created time: %s, expiration time: %s.%n",
operationResult.getCreatedAt(), operationResult.getExpiresAt());
return pollResult.getFinalResult();
})
.subscribe(healthcareTaskResultPagedFlux -> healthcareTaskResultPagedFlux.subscribe(
healthcareEntitiesResultCollection -> {
System.out.printf("Results of Azure Text Analytics \"Analyze Healthcare\" Model, version: %s%n",
healthcareEntitiesResultCollection.getModelVersion());
TextDocumentBatchStatistics batchStatistics = healthcareEntitiesResultCollection.getStatistics();
System.out.printf("Documents statistics: document count = %s, erroneous document count = %s,"
+ " transaction count = %s, valid document count = %s.%n",
batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(),
batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
healthcareEntitiesResultCollection.forEach(healthcareEntitiesResult -> {
System.out.println("Document id = " + healthcareEntitiesResult.getId());
System.out.println("Document entities: ");
AtomicInteger ct = new AtomicInteger();
healthcareEntitiesResult.getEntities().forEach(healthcareEntity -> {
System.out.printf(
"\ti = %d, Text: %s, category: %s, subcategory: %s, confidence score: %f.%n",
ct.getAndIncrement(), healthcareEntity.getText(), healthcareEntity.getCategory(),
healthcareEntity.getSubcategory(), healthcareEntity.getConfidenceScore());
IterableStream<EntityDataSource> dataSources = healthcareEntity.getDataSources();
if (dataSources != null) {
dataSources.forEach(dataSource -> System.out.printf(
"\t\tEntity ID in data source: %s, data source: %s.%n",
dataSource.getEntityId(), dataSource.getName()));
}
Map<HealthcareEntity, HealthcareEntityRelationType> relatedHealthcareEntities =
healthcareEntity.getRelatedEntities();
if (!CoreUtils.isNullOrEmpty(relatedHealthcareEntities)) {
relatedHealthcareEntities.forEach(
(relatedHealthcareEntity, entityRelationType) -> System.out.printf(
"\t\tRelated entity: %s, relation type: %s.%n",
relatedHealthcareEntity.getText(), entityRelationType));
}
});
});
}
));
try {
TimeUnit.MINUTES.sleep(5);
} catch (InterruptedException e) {
e.printStackTrace();
}
} | class AnalyzeHealthcareEntitiesAsync {
/**
* Main method to invoke this demo about how to begin recognizing the healthcare long-running operation.
*
* @param args Unused arguments to the program.
*/
} | class AnalyzeHealthcareEntitiesAsync {
/**
* Main method to invoke this demo about how to begin recognizing the healthcare long-running operation.
*
* @param args Unused arguments to the program.
*/
} |
`"defaultOidcUser"` appeared more than one time. Maybe we can define: ``` private static final String DEFAULT_OIDC_USER = "defaultOidcUser"; ``` | public OidcUser loadUser(OidcUserRequest userRequest) throws OAuth2AuthenticationException {
OidcUser oidcUser = oidcUserService.loadUser(userRequest);
Authentication authentication = SecurityContextHolder.getContext().getAuthentication();
if (authentication != null) {
DefaultOidcUser defaultOidcUser = (DefaultOidcUser) session.getAttribute("defaultOidcUser");
return defaultOidcUser;
}
Set<String> groups = Optional.of(userRequest)
.map(OAuth2UserRequest::getAccessToken)
.map(AbstractOAuth2Token::getTokenValue)
.map(graphClient::getGroupsFromGraph)
.orElseGet(Collections::emptySet);
Set<String> groupRoles = groups.stream()
.filter(properties::isAllowedGroup)
.map(group -> ROLE_PREFIX + group)
.collect(Collectors.toSet());
Set<SimpleGrantedAuthority> authorities = groupRoles.stream()
.map(SimpleGrantedAuthority::new)
.collect(Collectors.toSet());
if (authorities.isEmpty()) {
authorities = DEFAULT_AUTHORITY_SET;
}
String nameAttributeKey =
Optional.of(userRequest)
.map(OAuth2UserRequest::getClientRegistration)
.map(ClientRegistration::getProviderDetails)
.map(ClientRegistration.ProviderDetails::getUserInfoEndpoint)
.map(ClientRegistration.ProviderDetails.UserInfoEndpoint::getUserNameAttributeName)
.filter(StringUtils::hasText)
.orElse(AADTokenClaim.NAME);
DefaultOidcUser defaultOidcUser = new DefaultOidcUser(authorities, oidcUser.getIdToken(), nameAttributeKey);
session.setAttribute("defaultOidcUser", defaultOidcUser);
return defaultOidcUser;
} | DefaultOidcUser defaultOidcUser = (DefaultOidcUser) session.getAttribute("defaultOidcUser"); | public OidcUser loadUser(OidcUserRequest userRequest) throws OAuth2AuthenticationException {
OidcUser oidcUser = oidcUserService.loadUser(userRequest);
Authentication authentication = SecurityContextHolder.getContext().getAuthentication();
ServletRequestAttributes attr = (ServletRequestAttributes) RequestContextHolder.currentRequestAttributes();
HttpSession session = attr.getRequest().getSession(true);
if (authentication != null) {
return (DefaultOidcUser) session.getAttribute(DEFAULT_OIDC_USER);
}
Set<String> groups = Optional.of(userRequest)
.map(OAuth2UserRequest::getAccessToken)
.map(AbstractOAuth2Token::getTokenValue)
.map(graphClient::getGroupsFromGraph)
.orElseGet(Collections::emptySet);
Set<String> groupRoles = groups.stream()
.filter(properties::isAllowedGroup)
.map(group -> ROLE_PREFIX + group)
.collect(Collectors.toSet());
Set<SimpleGrantedAuthority> authorities = groupRoles.stream()
.map(SimpleGrantedAuthority::new)
.collect(Collectors.toSet());
if (authorities.isEmpty()) {
authorities = DEFAULT_AUTHORITY_SET;
}
String nameAttributeKey =
Optional.of(userRequest)
.map(OAuth2UserRequest::getClientRegistration)
.map(ClientRegistration::getProviderDetails)
.map(ClientRegistration.ProviderDetails::getUserInfoEndpoint)
.map(ClientRegistration.ProviderDetails.UserInfoEndpoint::getUserNameAttributeName)
.filter(StringUtils::hasText)
.orElse(AADTokenClaim.NAME);
DefaultOidcUser defaultOidcUser = new DefaultOidcUser(authorities, oidcUser.getIdToken(), nameAttributeKey);
session.setAttribute(DEFAULT_OIDC_USER, defaultOidcUser);
return defaultOidcUser;
} | class AzureActiveDirectoryOAuth2UserService implements OAuth2UserService<OidcUserRequest, OidcUser> {
private final OidcUserService oidcUserService;
private final AADAuthenticationProperties properties;
private final GraphClient graphClient;
@Autowired
private HttpSession session;
public AzureActiveDirectoryOAuth2UserService(
AADAuthenticationProperties properties
) {
this.properties = properties;
this.oidcUserService = new OidcUserService();
this.graphClient = new GraphClient(properties);
}
@Override
} | class AzureActiveDirectoryOAuth2UserService implements OAuth2UserService<OidcUserRequest, OidcUser> {
private final OidcUserService oidcUserService;
private final AADAuthenticationProperties properties;
private final GraphClient graphClient;
private static final String DEFAULT_OIDC_USER = "defaultOidcUser";
public AzureActiveDirectoryOAuth2UserService(
AADAuthenticationProperties properties
) {
this.properties = properties;
this.oidcUserService = new OidcUserService();
this.graphClient = new GraphClient(properties);
}
@Override
} |
And did you test it? Whether `DefaultOidcUser` serialize / deserialize well? | public OidcUser loadUser(OidcUserRequest userRequest) throws OAuth2AuthenticationException {
OidcUser oidcUser = oidcUserService.loadUser(userRequest);
Authentication authentication = SecurityContextHolder.getContext().getAuthentication();
if (authentication != null) {
DefaultOidcUser defaultOidcUser = (DefaultOidcUser) session.getAttribute("defaultOidcUser");
return defaultOidcUser;
}
Set<String> groups = Optional.of(userRequest)
.map(OAuth2UserRequest::getAccessToken)
.map(AbstractOAuth2Token::getTokenValue)
.map(graphClient::getGroupsFromGraph)
.orElseGet(Collections::emptySet);
Set<String> groupRoles = groups.stream()
.filter(properties::isAllowedGroup)
.map(group -> ROLE_PREFIX + group)
.collect(Collectors.toSet());
Set<SimpleGrantedAuthority> authorities = groupRoles.stream()
.map(SimpleGrantedAuthority::new)
.collect(Collectors.toSet());
if (authorities.isEmpty()) {
authorities = DEFAULT_AUTHORITY_SET;
}
String nameAttributeKey =
Optional.of(userRequest)
.map(OAuth2UserRequest::getClientRegistration)
.map(ClientRegistration::getProviderDetails)
.map(ClientRegistration.ProviderDetails::getUserInfoEndpoint)
.map(ClientRegistration.ProviderDetails.UserInfoEndpoint::getUserNameAttributeName)
.filter(StringUtils::hasText)
.orElse(AADTokenClaim.NAME);
DefaultOidcUser defaultOidcUser = new DefaultOidcUser(authorities, oidcUser.getIdToken(), nameAttributeKey);
session.setAttribute("defaultOidcUser", defaultOidcUser);
return defaultOidcUser;
} | DefaultOidcUser defaultOidcUser = (DefaultOidcUser) session.getAttribute("defaultOidcUser"); | public OidcUser loadUser(OidcUserRequest userRequest) throws OAuth2AuthenticationException {
OidcUser oidcUser = oidcUserService.loadUser(userRequest);
Authentication authentication = SecurityContextHolder.getContext().getAuthentication();
ServletRequestAttributes attr = (ServletRequestAttributes) RequestContextHolder.currentRequestAttributes();
HttpSession session = attr.getRequest().getSession(true);
if (authentication != null) {
return (DefaultOidcUser) session.getAttribute(DEFAULT_OIDC_USER);
}
Set<String> groups = Optional.of(userRequest)
.map(OAuth2UserRequest::getAccessToken)
.map(AbstractOAuth2Token::getTokenValue)
.map(graphClient::getGroupsFromGraph)
.orElseGet(Collections::emptySet);
Set<String> groupRoles = groups.stream()
.filter(properties::isAllowedGroup)
.map(group -> ROLE_PREFIX + group)
.collect(Collectors.toSet());
Set<SimpleGrantedAuthority> authorities = groupRoles.stream()
.map(SimpleGrantedAuthority::new)
.collect(Collectors.toSet());
if (authorities.isEmpty()) {
authorities = DEFAULT_AUTHORITY_SET;
}
String nameAttributeKey =
Optional.of(userRequest)
.map(OAuth2UserRequest::getClientRegistration)
.map(ClientRegistration::getProviderDetails)
.map(ClientRegistration.ProviderDetails::getUserInfoEndpoint)
.map(ClientRegistration.ProviderDetails.UserInfoEndpoint::getUserNameAttributeName)
.filter(StringUtils::hasText)
.orElse(AADTokenClaim.NAME);
DefaultOidcUser defaultOidcUser = new DefaultOidcUser(authorities, oidcUser.getIdToken(), nameAttributeKey);
session.setAttribute(DEFAULT_OIDC_USER, defaultOidcUser);
return defaultOidcUser;
} | class AzureActiveDirectoryOAuth2UserService implements OAuth2UserService<OidcUserRequest, OidcUser> {
private final OidcUserService oidcUserService;
private final AADAuthenticationProperties properties;
private final GraphClient graphClient;
@Autowired
private HttpSession session;
public AzureActiveDirectoryOAuth2UserService(
AADAuthenticationProperties properties
) {
this.properties = properties;
this.oidcUserService = new OidcUserService();
this.graphClient = new GraphClient(properties);
}
@Override
} | class AzureActiveDirectoryOAuth2UserService implements OAuth2UserService<OidcUserRequest, OidcUser> {
private final OidcUserService oidcUserService;
private final AADAuthenticationProperties properties;
private final GraphClient graphClient;
private static final String DEFAULT_OIDC_USER = "defaultOidcUser";
public AzureActiveDirectoryOAuth2UserService(
AADAuthenticationProperties properties
) {
this.properties = properties;
this.oidcUserService = new OidcUserService();
this.graphClient = new GraphClient(properties);
}
@Override
} |
Maybe we should move ` && !authorizationProperties.get(id).getOnDemand()` to `public boolean isAuthzClient(String id)`. And enrich `AzureActiveDirectoryConfigurationTest`. | public boolean isAuthzClient(String id) {
ClientRegistration client = findByRegistrationId(id);
return client != null && isAuthzClient(client) && !authorizationProperties.get(id).getOnDemand();
} | return client != null && isAuthzClient(client) && !authorizationProperties.get(id).getOnDemand(); | public boolean isAuthzClient(String id) {
ClientRegistration client = findByRegistrationId(id);
return client != null && isAuthzClient(client);
} | class AzureClientRegistrationRepository implements ClientRegistrationRepository, Iterable<ClientRegistration> {
private final AzureClientRegistration azureClient;
private final List<ClientRegistration> otherClients;
private final Map<String, ClientRegistration> allClients;
private Map<String, AuthorizationProperties> authorizationProperties;
public AzureClientRegistrationRepository(AzureClientRegistration azureClient,
List<ClientRegistration> otherClients,
Map<String, AuthorizationProperties> authorizationProperties) {
this.azureClient = azureClient;
this.otherClients = new ArrayList<>(otherClients);
this.authorizationProperties = authorizationProperties;
allClients = new HashMap<>();
addClientRegistration(azureClient.getClient());
for (ClientRegistration c : otherClients) {
addClientRegistration(c);
}
}
private void addClientRegistration(ClientRegistration client) {
allClients.put(client.getRegistrationId(), client);
}
@Override
public ClientRegistration findByRegistrationId(String registrationId) {
return allClients.get(registrationId);
}
@NotNull
@Override
public Iterator<ClientRegistration> iterator() {
return Collections.singleton(azureClient.getClient()).iterator();
}
public AzureClientRegistration getAzureClient() {
return azureClient;
}
public boolean isAuthzClient(ClientRegistration client) {
return otherClients.contains(client);
}
public Map<String, AuthorizationProperties> getAuthorizationProperties() {
return authorizationProperties;
}
public void setAuthorizationProperties(Map<String, AuthorizationProperties> authorizationProperties) {
this.authorizationProperties = authorizationProperties;
}
} | class AzureClientRegistrationRepository implements ClientRegistrationRepository, Iterable<ClientRegistration> {
private final AzureClientRegistration azureClient;
private final List<ClientRegistration> otherClients;
private final Map<String, ClientRegistration> allClients;
private AADAuthenticationProperties properties;
public AzureClientRegistrationRepository(AzureClientRegistration azureClient,
List<ClientRegistration> otherClients,
AADAuthenticationProperties properties) {
this.azureClient = azureClient;
this.otherClients = new ArrayList<>(otherClients);
this.properties = properties;
allClients = new HashMap<>();
addClientRegistration(azureClient.getClient());
for (ClientRegistration c : otherClients) {
addClientRegistration(c);
}
}
private void addClientRegistration(ClientRegistration client) {
allClients.put(client.getRegistrationId(), client);
}
@Override
public ClientRegistration findByRegistrationId(String registrationId) {
return allClients.get(registrationId);
}
@NotNull
@Override
public Iterator<ClientRegistration> iterator() {
return Collections.singleton(azureClient.getClient()).iterator();
}
public AzureClientRegistration getAzureClient() {
return azureClient;
}
public boolean isAuthzClient(ClientRegistration client) {
return otherClients.contains(client)
&& properties.getAuthorization().get(client.getClientName()) != null
&& !properties.getAuthorization().get(client.getClientName()).isOnDemand();
}
} |
`https://graph.microsoft.com/User.Read` belong to `graph` client, so `https://graph.microsoft.com/User.Read` should not exist in `DefaultScopes`, is that right? | public void clientRequiresOnDemandPermissions() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read",
"azure.activedirectory.authorization.graph.on-demand = true",
"azure.activedirectory.authorization.arm.scopes = https:
);
context.refresh();
AzureClientRegistrationRepository repo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = repo.findByRegistrationId("azure");
ClientRegistration graph = repo.findByRegistrationId("graph");
ClientRegistration arm = repo.findByRegistrationId("arm");
assertNotNull(azure);
assertDefaultScopes(
azure,
"openid",
"profile",
"https:
"offline_access",
"https:
assertFalse(repo.isAuthzClient(graph));
assertTrue(repo.isAuthzClient(arm));
assertFalse(repo.isAuthzClient("graph"));
assertTrue(repo.isAuthzClient("arm"));
} | "https: | public void clientRequiresOnDemandPermissions() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read",
"azure.activedirectory.authorization.graph.on-demand = true",
"azure.activedirectory.authorization.arm.scopes = https:
);
context.refresh();
AzureClientRegistrationRepository repo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = repo.findByRegistrationId("azure");
ClientRegistration graph = repo.findByRegistrationId("graph");
ClientRegistration arm = repo.findByRegistrationId("arm");
assertNotNull(azure);
assertDefaultScopes(
azure,
"openid",
"profile",
"https:
"offline_access",
"https:
assertFalse(repo.isAuthzClient(graph));
assertTrue(repo.isAuthzClient(arm));
assertFalse(repo.isAuthzClient("graph"));
assertTrue(repo.isAuthzClient("arm"));
} | class AzureActiveDirectoryConfigurationTest {
private AnnotationConfigApplicationContext getContext() {
AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(
context,
"azure.activedirectory.client-id = fake-client-id",
"azure.activedirectory.client-secret = fake-client-secret",
"azure.activedirectory.tenant-id = fake-tenant-id",
"azure.activedirectory.user-group.allowed-groups = group1, group2"
);
context.register(AzureActiveDirectoryConfiguration.class);
return context;
}
@Test
public void clientRegistered() {
AnnotationConfigApplicationContext context = getContext();
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
assertNotNull(azure);
assertEquals("fake-client-id", azure.getClientId());
assertEquals("fake-client-secret", azure.getClientSecret());
AuthorizationServerEndpoints endpoints = new AuthorizationServerEndpoints();
assertEquals(endpoints.authorizationEndpoint("fake-tenant-id"),
azure.getProviderDetails().getAuthorizationUri());
assertEquals(endpoints.tokenEndpoint("fake-tenant-id"), azure.getProviderDetails().getTokenUri());
assertEquals(endpoints.jwkSetEndpoint("fake-tenant-id"), azure.getProviderDetails().getJwkSetUri());
assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate());
assertDefaultScopes(azure, "openid", "profile", "https:
}
@Test
public void clientRequiresPermissionRegistered() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read");
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
ClientRegistration graph = clientRepo.findByRegistrationId("graph");
assertNotNull(azure);
assertNotNull(graph);
assertDefaultScopes(azure,
"openid", "profile", "offline_access", "https:
assertDefaultScopes(graph, "Calendars.Read");
}
@Test
public void clientRequiresMultiPermissions() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read",
"azure.activedirectory.authorization.arm.scopes = https:
);
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
ClientRegistration graph = clientRepo.findByRegistrationId("graph");
assertDefaultScopes(
azure,
"openid",
"profile",
"offline_access",
"Calendars.Read",
"https:
"https:
assertDefaultScopes(graph, "Calendars.Read");
}
@Test
public void clientRequiresPermissionInDefaultClient() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read");
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
assertDefaultScopes(azure,
"openid", "profile", "offline_access", "https:
}
@Test
public void aadAwareClientRepository() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read");
context.refresh();
AzureClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
ClientRegistration graph = clientRepo.findByRegistrationId("graph");
assertDefaultScopes(
clientRepo.getAzureClient(),
"openid", "profile", "offline_access", "https:
);
assertEquals(clientRepo.getAzureClient().getClient(), azure);
assertFalse(clientRepo.isAuthzClient(azure));
assertTrue(clientRepo.isAuthzClient(graph));
assertFalse(clientRepo.isAuthzClient("azure"));
assertTrue(clientRepo.isAuthzClient("graph"));
List<ClientRegistration> clients = collectClients(clientRepo);
assertEquals(1, clients.size());
assertEquals("azure", clients.get(0).getRegistrationId());
}
@Test
public void defaultClientWithAuthzScope() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.azure.scopes = Calendars.Read");
context.refresh();
AzureClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
assertDefaultScopes(
clientRepo.getAzureClient(),
"openid", "profile", "offline_access", "https:
);
}
@Test
public void customizeUri() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization-server-uri = http:
context.refresh();
AzureClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
AuthorizationServerEndpoints endpoints = new AuthorizationServerEndpoints("http:
assertEquals(endpoints.authorizationEndpoint("fake-tenant-id"),
azure.getProviderDetails().getAuthorizationUri());
assertEquals(endpoints.tokenEndpoint("fake-tenant-id"), azure.getProviderDetails().getTokenUri());
assertEquals(endpoints.jwkSetEndpoint("fake-tenant-id"), azure.getProviderDetails().getJwkSetUri());
}
@Test
private void assertDefaultScopes(ClientRegistration client, String... scopes) {
assertEquals(scopes.length, client.getScopes().size());
for (String s : scopes) {
assertTrue(client.getScopes().contains(s));
}
}
private void assertDefaultScopes(AzureClientRegistration client, String... expected) {
assertEquals(expected.length, client.getAccessTokenScopes().size());
for (String e : expected) {
assertTrue(client.getAccessTokenScopes().contains(e));
}
}
private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) {
List<ClientRegistration> result = new ArrayList<>();
itr.forEach(result::add);
return result;
}
} | class AzureActiveDirectoryConfigurationTest {
private AnnotationConfigApplicationContext getContext() {
AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(
context,
"azure.activedirectory.client-id = fake-client-id",
"azure.activedirectory.client-secret = fake-client-secret",
"azure.activedirectory.tenant-id = fake-tenant-id",
"azure.activedirectory.user-group.allowed-groups = group1, group2"
);
context.register(AzureActiveDirectoryConfiguration.class);
return context;
}
@Test
public void clientRegistered() {
AnnotationConfigApplicationContext context = getContext();
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
assertNotNull(azure);
assertEquals("fake-client-id", azure.getClientId());
assertEquals("fake-client-secret", azure.getClientSecret());
AuthorizationServerEndpoints endpoints = new AuthorizationServerEndpoints();
assertEquals(endpoints.authorizationEndpoint("fake-tenant-id"),
azure.getProviderDetails().getAuthorizationUri());
assertEquals(endpoints.tokenEndpoint("fake-tenant-id"), azure.getProviderDetails().getTokenUri());
assertEquals(endpoints.jwkSetEndpoint("fake-tenant-id"), azure.getProviderDetails().getJwkSetUri());
assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate());
assertDefaultScopes(azure, "openid", "profile", "https:
}
@Test
public void clientRequiresPermissionRegistered() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read");
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
ClientRegistration graph = clientRepo.findByRegistrationId("graph");
assertNotNull(azure);
assertNotNull(graph);
assertDefaultScopes(azure,
"openid", "profile", "offline_access", "https:
assertDefaultScopes(graph, "Calendars.Read");
}
@Test
public void clientRequiresMultiPermissions() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read",
"azure.activedirectory.authorization.arm.scopes = https:
);
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
ClientRegistration graph = clientRepo.findByRegistrationId("graph");
assertDefaultScopes(
azure,
"openid",
"profile",
"offline_access",
"Calendars.Read",
"https:
"https:
assertDefaultScopes(graph, "Calendars.Read");
}
@Test
public void clientRequiresPermissionInDefaultClient() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read");
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
assertDefaultScopes(azure,
"openid", "profile", "offline_access", "https:
}
@Test
public void aadAwareClientRepository() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read");
context.refresh();
AzureClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
ClientRegistration graph = clientRepo.findByRegistrationId("graph");
assertDefaultScopes(
clientRepo.getAzureClient(),
"openid", "profile", "offline_access", "https:
);
assertEquals(clientRepo.getAzureClient().getClient(), azure);
assertFalse(clientRepo.isAuthzClient(azure));
assertTrue(clientRepo.isAuthzClient(graph));
assertFalse(clientRepo.isAuthzClient("azure"));
assertTrue(clientRepo.isAuthzClient("graph"));
List<ClientRegistration> clients = collectClients(clientRepo);
assertEquals(1, clients.size());
assertEquals("azure", clients.get(0).getRegistrationId());
}
@Test
public void defaultClientWithAuthzScope() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.azure.scopes = Calendars.Read");
context.refresh();
AzureClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
assertDefaultScopes(
clientRepo.getAzureClient(),
"openid", "profile", "offline_access", "https:
);
}
@Test
public void customizeUri() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization-server-uri = http:
context.refresh();
AzureClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
AuthorizationServerEndpoints endpoints = new AuthorizationServerEndpoints("http:
assertEquals(endpoints.authorizationEndpoint("fake-tenant-id"),
azure.getProviderDetails().getAuthorizationUri());
assertEquals(endpoints.tokenEndpoint("fake-tenant-id"), azure.getProviderDetails().getTokenUri());
assertEquals(endpoints.jwkSetEndpoint("fake-tenant-id"), azure.getProviderDetails().getJwkSetUri());
}
@Test
private void assertDefaultScopes(ClientRegistration client, String... scopes) {
assertEquals(scopes.length, client.getScopes().size());
for (String s : scopes) {
assertTrue(client.getScopes().contains(s));
}
}
private void assertDefaultScopes(AzureClientRegistration client, String... expected) {
assertEquals(expected.length, client.getAccessTokenScopes().size());
for (String e : expected) {
assertTrue(client.getAccessTokenScopes().contains(e));
}
}
private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) {
List<ClientRegistration> result = new ArrayList<>();
itr.forEach(result::add);
return result;
}
} |
`https://graph.microsoft.com/User.Read` belong to default client if allowedGroups is not null. https://github.com/Azure/azure-sdk-for-java/blob/d1d7fe0c1dbfa4a6b1c157cbe6c07cdfa59efab2/sdk/spring/azure-spring-boot/src/main/java/com/azure/spring/aad/implementation/AzureActiveDirectoryConfiguration.java#L77-L84 | public void clientRequiresOnDemandPermissions() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read",
"azure.activedirectory.authorization.graph.on-demand = true",
"azure.activedirectory.authorization.arm.scopes = https:
);
context.refresh();
AzureClientRegistrationRepository repo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = repo.findByRegistrationId("azure");
ClientRegistration graph = repo.findByRegistrationId("graph");
ClientRegistration arm = repo.findByRegistrationId("arm");
assertNotNull(azure);
assertDefaultScopes(
azure,
"openid",
"profile",
"https:
"offline_access",
"https:
assertFalse(repo.isAuthzClient(graph));
assertTrue(repo.isAuthzClient(arm));
assertFalse(repo.isAuthzClient("graph"));
assertTrue(repo.isAuthzClient("arm"));
} | "https: | public void clientRequiresOnDemandPermissions() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read",
"azure.activedirectory.authorization.graph.on-demand = true",
"azure.activedirectory.authorization.arm.scopes = https:
);
context.refresh();
AzureClientRegistrationRepository repo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = repo.findByRegistrationId("azure");
ClientRegistration graph = repo.findByRegistrationId("graph");
ClientRegistration arm = repo.findByRegistrationId("arm");
assertNotNull(azure);
assertDefaultScopes(
azure,
"openid",
"profile",
"https:
"offline_access",
"https:
assertFalse(repo.isAuthzClient(graph));
assertTrue(repo.isAuthzClient(arm));
assertFalse(repo.isAuthzClient("graph"));
assertTrue(repo.isAuthzClient("arm"));
} | class AzureActiveDirectoryConfigurationTest {
private AnnotationConfigApplicationContext getContext() {
AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(
context,
"azure.activedirectory.client-id = fake-client-id",
"azure.activedirectory.client-secret = fake-client-secret",
"azure.activedirectory.tenant-id = fake-tenant-id",
"azure.activedirectory.user-group.allowed-groups = group1, group2"
);
context.register(AzureActiveDirectoryConfiguration.class);
return context;
}
@Test
public void clientRegistered() {
AnnotationConfigApplicationContext context = getContext();
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
assertNotNull(azure);
assertEquals("fake-client-id", azure.getClientId());
assertEquals("fake-client-secret", azure.getClientSecret());
AuthorizationServerEndpoints endpoints = new AuthorizationServerEndpoints();
assertEquals(endpoints.authorizationEndpoint("fake-tenant-id"),
azure.getProviderDetails().getAuthorizationUri());
assertEquals(endpoints.tokenEndpoint("fake-tenant-id"), azure.getProviderDetails().getTokenUri());
assertEquals(endpoints.jwkSetEndpoint("fake-tenant-id"), azure.getProviderDetails().getJwkSetUri());
assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate());
assertDefaultScopes(azure, "openid", "profile", "https:
}
@Test
public void clientRequiresPermissionRegistered() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read");
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
ClientRegistration graph = clientRepo.findByRegistrationId("graph");
assertNotNull(azure);
assertNotNull(graph);
assertDefaultScopes(azure,
"openid", "profile", "offline_access", "https:
assertDefaultScopes(graph, "Calendars.Read");
}
@Test
public void clientRequiresMultiPermissions() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read",
"azure.activedirectory.authorization.arm.scopes = https:
);
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
ClientRegistration graph = clientRepo.findByRegistrationId("graph");
assertDefaultScopes(
azure,
"openid",
"profile",
"offline_access",
"Calendars.Read",
"https:
"https:
assertDefaultScopes(graph, "Calendars.Read");
}
@Test
public void clientRequiresPermissionInDefaultClient() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read");
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
assertDefaultScopes(azure,
"openid", "profile", "offline_access", "https:
}
@Test
public void aadAwareClientRepository() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read");
context.refresh();
AzureClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
ClientRegistration graph = clientRepo.findByRegistrationId("graph");
assertDefaultScopes(
clientRepo.getAzureClient(),
"openid", "profile", "offline_access", "https:
);
assertEquals(clientRepo.getAzureClient().getClient(), azure);
assertFalse(clientRepo.isAuthzClient(azure));
assertTrue(clientRepo.isAuthzClient(graph));
assertFalse(clientRepo.isAuthzClient("azure"));
assertTrue(clientRepo.isAuthzClient("graph"));
List<ClientRegistration> clients = collectClients(clientRepo);
assertEquals(1, clients.size());
assertEquals("azure", clients.get(0).getRegistrationId());
}
@Test
public void defaultClientWithAuthzScope() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.azure.scopes = Calendars.Read");
context.refresh();
AzureClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
assertDefaultScopes(
clientRepo.getAzureClient(),
"openid", "profile", "offline_access", "https:
);
}
@Test
public void customizeUri() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization-server-uri = http:
context.refresh();
AzureClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
AuthorizationServerEndpoints endpoints = new AuthorizationServerEndpoints("http:
assertEquals(endpoints.authorizationEndpoint("fake-tenant-id"),
azure.getProviderDetails().getAuthorizationUri());
assertEquals(endpoints.tokenEndpoint("fake-tenant-id"), azure.getProviderDetails().getTokenUri());
assertEquals(endpoints.jwkSetEndpoint("fake-tenant-id"), azure.getProviderDetails().getJwkSetUri());
}
@Test
private void assertDefaultScopes(ClientRegistration client, String... scopes) {
assertEquals(scopes.length, client.getScopes().size());
for (String s : scopes) {
assertTrue(client.getScopes().contains(s));
}
}
private void assertDefaultScopes(AzureClientRegistration client, String... expected) {
assertEquals(expected.length, client.getAccessTokenScopes().size());
for (String e : expected) {
assertTrue(client.getAccessTokenScopes().contains(e));
}
}
private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) {
List<ClientRegistration> result = new ArrayList<>();
itr.forEach(result::add);
return result;
}
} | class AzureActiveDirectoryConfigurationTest {
private AnnotationConfigApplicationContext getContext() {
AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(
context,
"azure.activedirectory.client-id = fake-client-id",
"azure.activedirectory.client-secret = fake-client-secret",
"azure.activedirectory.tenant-id = fake-tenant-id",
"azure.activedirectory.user-group.allowed-groups = group1, group2"
);
context.register(AzureActiveDirectoryConfiguration.class);
return context;
}
@Test
public void clientRegistered() {
AnnotationConfigApplicationContext context = getContext();
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
assertNotNull(azure);
assertEquals("fake-client-id", azure.getClientId());
assertEquals("fake-client-secret", azure.getClientSecret());
AuthorizationServerEndpoints endpoints = new AuthorizationServerEndpoints();
assertEquals(endpoints.authorizationEndpoint("fake-tenant-id"),
azure.getProviderDetails().getAuthorizationUri());
assertEquals(endpoints.tokenEndpoint("fake-tenant-id"), azure.getProviderDetails().getTokenUri());
assertEquals(endpoints.jwkSetEndpoint("fake-tenant-id"), azure.getProviderDetails().getJwkSetUri());
assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate());
assertDefaultScopes(azure, "openid", "profile", "https:
}
@Test
public void clientRequiresPermissionRegistered() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read");
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
ClientRegistration graph = clientRepo.findByRegistrationId("graph");
assertNotNull(azure);
assertNotNull(graph);
assertDefaultScopes(azure,
"openid", "profile", "offline_access", "https:
assertDefaultScopes(graph, "Calendars.Read");
}
@Test
public void clientRequiresMultiPermissions() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read",
"azure.activedirectory.authorization.arm.scopes = https:
);
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
ClientRegistration graph = clientRepo.findByRegistrationId("graph");
assertDefaultScopes(
azure,
"openid",
"profile",
"offline_access",
"Calendars.Read",
"https:
"https:
assertDefaultScopes(graph, "Calendars.Read");
}
@Test
public void clientRequiresPermissionInDefaultClient() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read");
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
assertDefaultScopes(azure,
"openid", "profile", "offline_access", "https:
}
@Test
public void aadAwareClientRepository() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read");
context.refresh();
AzureClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
ClientRegistration graph = clientRepo.findByRegistrationId("graph");
assertDefaultScopes(
clientRepo.getAzureClient(),
"openid", "profile", "offline_access", "https:
);
assertEquals(clientRepo.getAzureClient().getClient(), azure);
assertFalse(clientRepo.isAuthzClient(azure));
assertTrue(clientRepo.isAuthzClient(graph));
assertFalse(clientRepo.isAuthzClient("azure"));
assertTrue(clientRepo.isAuthzClient("graph"));
List<ClientRegistration> clients = collectClients(clientRepo);
assertEquals(1, clients.size());
assertEquals("azure", clients.get(0).getRegistrationId());
}
@Test
public void defaultClientWithAuthzScope() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.azure.scopes = Calendars.Read");
context.refresh();
AzureClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
assertDefaultScopes(
clientRepo.getAzureClient(),
"openid", "profile", "offline_access", "https:
);
}
@Test
public void customizeUri() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization-server-uri = http:
context.refresh();
AzureClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
AuthorizationServerEndpoints endpoints = new AuthorizationServerEndpoints("http:
assertEquals(endpoints.authorizationEndpoint("fake-tenant-id"),
azure.getProviderDetails().getAuthorizationUri());
assertEquals(endpoints.tokenEndpoint("fake-tenant-id"), azure.getProviderDetails().getTokenUri());
assertEquals(endpoints.jwkSetEndpoint("fake-tenant-id"), azure.getProviderDetails().getJwkSetUri());
}
@Test
private void assertDefaultScopes(ClientRegistration client, String... scopes) {
assertEquals(scopes.length, client.getScopes().size());
for (String s : scopes) {
assertTrue(client.getScopes().contains(s));
}
}
private void assertDefaultScopes(AzureClientRegistration client, String... expected) {
assertEquals(expected.length, client.getAccessTokenScopes().size());
for (String e : expected) {
assertTrue(client.getAccessTokenScopes().contains(e));
}
}
private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) {
List<ClientRegistration> result = new ArrayList<>();
itr.forEach(result::add);
return result;
}
} |
It make sense. | public void clientRequiresOnDemandPermissions() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read",
"azure.activedirectory.authorization.graph.on-demand = true",
"azure.activedirectory.authorization.arm.scopes = https:
);
context.refresh();
AzureClientRegistrationRepository repo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = repo.findByRegistrationId("azure");
ClientRegistration graph = repo.findByRegistrationId("graph");
ClientRegistration arm = repo.findByRegistrationId("arm");
assertNotNull(azure);
assertDefaultScopes(
azure,
"openid",
"profile",
"https:
"offline_access",
"https:
assertFalse(repo.isAuthzClient(graph));
assertTrue(repo.isAuthzClient(arm));
assertFalse(repo.isAuthzClient("graph"));
assertTrue(repo.isAuthzClient("arm"));
} | "https: | public void clientRequiresOnDemandPermissions() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read",
"azure.activedirectory.authorization.graph.on-demand = true",
"azure.activedirectory.authorization.arm.scopes = https:
);
context.refresh();
AzureClientRegistrationRepository repo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = repo.findByRegistrationId("azure");
ClientRegistration graph = repo.findByRegistrationId("graph");
ClientRegistration arm = repo.findByRegistrationId("arm");
assertNotNull(azure);
assertDefaultScopes(
azure,
"openid",
"profile",
"https:
"offline_access",
"https:
assertFalse(repo.isAuthzClient(graph));
assertTrue(repo.isAuthzClient(arm));
assertFalse(repo.isAuthzClient("graph"));
assertTrue(repo.isAuthzClient("arm"));
} | class AzureActiveDirectoryConfigurationTest {
private AnnotationConfigApplicationContext getContext() {
AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(
context,
"azure.activedirectory.client-id = fake-client-id",
"azure.activedirectory.client-secret = fake-client-secret",
"azure.activedirectory.tenant-id = fake-tenant-id",
"azure.activedirectory.user-group.allowed-groups = group1, group2"
);
context.register(AzureActiveDirectoryConfiguration.class);
return context;
}
@Test
public void clientRegistered() {
AnnotationConfigApplicationContext context = getContext();
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
assertNotNull(azure);
assertEquals("fake-client-id", azure.getClientId());
assertEquals("fake-client-secret", azure.getClientSecret());
AuthorizationServerEndpoints endpoints = new AuthorizationServerEndpoints();
assertEquals(endpoints.authorizationEndpoint("fake-tenant-id"),
azure.getProviderDetails().getAuthorizationUri());
assertEquals(endpoints.tokenEndpoint("fake-tenant-id"), azure.getProviderDetails().getTokenUri());
assertEquals(endpoints.jwkSetEndpoint("fake-tenant-id"), azure.getProviderDetails().getJwkSetUri());
assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate());
assertDefaultScopes(azure, "openid", "profile", "https:
}
@Test
public void clientRequiresPermissionRegistered() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read");
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
ClientRegistration graph = clientRepo.findByRegistrationId("graph");
assertNotNull(azure);
assertNotNull(graph);
assertDefaultScopes(azure,
"openid", "profile", "offline_access", "https:
assertDefaultScopes(graph, "Calendars.Read");
}
@Test
public void clientRequiresMultiPermissions() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read",
"azure.activedirectory.authorization.arm.scopes = https:
);
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
ClientRegistration graph = clientRepo.findByRegistrationId("graph");
assertDefaultScopes(
azure,
"openid",
"profile",
"offline_access",
"Calendars.Read",
"https:
"https:
assertDefaultScopes(graph, "Calendars.Read");
}
@Test
public void clientRequiresPermissionInDefaultClient() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read");
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
assertDefaultScopes(azure,
"openid", "profile", "offline_access", "https:
}
@Test
public void aadAwareClientRepository() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read");
context.refresh();
AzureClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
ClientRegistration graph = clientRepo.findByRegistrationId("graph");
assertDefaultScopes(
clientRepo.getAzureClient(),
"openid", "profile", "offline_access", "https:
);
assertEquals(clientRepo.getAzureClient().getClient(), azure);
assertFalse(clientRepo.isAuthzClient(azure));
assertTrue(clientRepo.isAuthzClient(graph));
assertFalse(clientRepo.isAuthzClient("azure"));
assertTrue(clientRepo.isAuthzClient("graph"));
List<ClientRegistration> clients = collectClients(clientRepo);
assertEquals(1, clients.size());
assertEquals("azure", clients.get(0).getRegistrationId());
}
@Test
public void defaultClientWithAuthzScope() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.azure.scopes = Calendars.Read");
context.refresh();
AzureClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
assertDefaultScopes(
clientRepo.getAzureClient(),
"openid", "profile", "offline_access", "https:
);
}
@Test
public void customizeUri() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization-server-uri = http:
context.refresh();
AzureClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
AuthorizationServerEndpoints endpoints = new AuthorizationServerEndpoints("http:
assertEquals(endpoints.authorizationEndpoint("fake-tenant-id"),
azure.getProviderDetails().getAuthorizationUri());
assertEquals(endpoints.tokenEndpoint("fake-tenant-id"), azure.getProviderDetails().getTokenUri());
assertEquals(endpoints.jwkSetEndpoint("fake-tenant-id"), azure.getProviderDetails().getJwkSetUri());
}
@Test
private void assertDefaultScopes(ClientRegistration client, String... scopes) {
assertEquals(scopes.length, client.getScopes().size());
for (String s : scopes) {
assertTrue(client.getScopes().contains(s));
}
}
private void assertDefaultScopes(AzureClientRegistration client, String... expected) {
assertEquals(expected.length, client.getAccessTokenScopes().size());
for (String e : expected) {
assertTrue(client.getAccessTokenScopes().contains(e));
}
}
private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) {
List<ClientRegistration> result = new ArrayList<>();
itr.forEach(result::add);
return result;
}
} | class AzureActiveDirectoryConfigurationTest {
private AnnotationConfigApplicationContext getContext() {
AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(
context,
"azure.activedirectory.client-id = fake-client-id",
"azure.activedirectory.client-secret = fake-client-secret",
"azure.activedirectory.tenant-id = fake-tenant-id",
"azure.activedirectory.user-group.allowed-groups = group1, group2"
);
context.register(AzureActiveDirectoryConfiguration.class);
return context;
}
@Test
public void clientRegistered() {
AnnotationConfigApplicationContext context = getContext();
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
assertNotNull(azure);
assertEquals("fake-client-id", azure.getClientId());
assertEquals("fake-client-secret", azure.getClientSecret());
AuthorizationServerEndpoints endpoints = new AuthorizationServerEndpoints();
assertEquals(endpoints.authorizationEndpoint("fake-tenant-id"),
azure.getProviderDetails().getAuthorizationUri());
assertEquals(endpoints.tokenEndpoint("fake-tenant-id"), azure.getProviderDetails().getTokenUri());
assertEquals(endpoints.jwkSetEndpoint("fake-tenant-id"), azure.getProviderDetails().getJwkSetUri());
assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate());
assertDefaultScopes(azure, "openid", "profile", "https:
}
@Test
public void clientRequiresPermissionRegistered() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read");
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
ClientRegistration graph = clientRepo.findByRegistrationId("graph");
assertNotNull(azure);
assertNotNull(graph);
assertDefaultScopes(azure,
"openid", "profile", "offline_access", "https:
assertDefaultScopes(graph, "Calendars.Read");
}
@Test
public void clientRequiresMultiPermissions() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read",
"azure.activedirectory.authorization.arm.scopes = https:
);
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
ClientRegistration graph = clientRepo.findByRegistrationId("graph");
assertDefaultScopes(
azure,
"openid",
"profile",
"offline_access",
"Calendars.Read",
"https:
"https:
assertDefaultScopes(graph, "Calendars.Read");
}
@Test
public void clientRequiresPermissionInDefaultClient() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read");
context.refresh();
ClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
assertDefaultScopes(azure,
"openid", "profile", "offline_access", "https:
}
@Test
public void aadAwareClientRepository() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.graph.scopes = Calendars.Read");
context.refresh();
AzureClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
ClientRegistration graph = clientRepo.findByRegistrationId("graph");
assertDefaultScopes(
clientRepo.getAzureClient(),
"openid", "profile", "offline_access", "https:
);
assertEquals(clientRepo.getAzureClient().getClient(), azure);
assertFalse(clientRepo.isAuthzClient(azure));
assertTrue(clientRepo.isAuthzClient(graph));
assertFalse(clientRepo.isAuthzClient("azure"));
assertTrue(clientRepo.isAuthzClient("graph"));
List<ClientRegistration> clients = collectClients(clientRepo);
assertEquals(1, clients.size());
assertEquals("azure", clients.get(0).getRegistrationId());
}
@Test
public void defaultClientWithAuthzScope() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization.azure.scopes = Calendars.Read");
context.refresh();
AzureClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
assertDefaultScopes(
clientRepo.getAzureClient(),
"openid", "profile", "offline_access", "https:
);
}
@Test
public void customizeUri() {
AnnotationConfigApplicationContext context = getContext();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(context,
"azure.activedirectory.authorization-server-uri = http:
context.refresh();
AzureClientRegistrationRepository clientRepo = context.getBean(AzureClientRegistrationRepository.class);
ClientRegistration azure = clientRepo.findByRegistrationId("azure");
AuthorizationServerEndpoints endpoints = new AuthorizationServerEndpoints("http:
assertEquals(endpoints.authorizationEndpoint("fake-tenant-id"),
azure.getProviderDetails().getAuthorizationUri());
assertEquals(endpoints.tokenEndpoint("fake-tenant-id"), azure.getProviderDetails().getTokenUri());
assertEquals(endpoints.jwkSetEndpoint("fake-tenant-id"), azure.getProviderDetails().getJwkSetUri());
}
@Test
private void assertDefaultScopes(ClientRegistration client, String... scopes) {
assertEquals(scopes.length, client.getScopes().size());
for (String s : scopes) {
assertTrue(client.getScopes().contains(s));
}
}
private void assertDefaultScopes(AzureClientRegistration client, String... expected) {
assertEquals(expected.length, client.getAccessTokenScopes().size());
for (String e : expected) {
assertTrue(client.getAccessTokenScopes().contains(e));
}
}
private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) {
List<ClientRegistration> result = new ArrayList<>();
itr.forEach(result::add);
return result;
}
} |
`CoreUtils.isNullOrEmpty` | public CosmosDBAccountImpl withIpRangeFilter(String ipRangeFilter) {
List<IpAddressOrRange> rules = new ArrayList<>();
for (String ip : ipRangeFilter.split(",")) {
rules.add(new IpAddressOrRange().withIpAddressOrRange(ip));
}
this.innerModel().withIpRules(rules);
return this;
} | } | public CosmosDBAccountImpl withIpRangeFilter(String ipRangeFilter) {
List<IpAddressOrRange> rules = new ArrayList<>();
if (!CoreUtils.isNullOrEmpty(ipRangeFilter)) {
for (String ip : ipRangeFilter.split(",")) {
rules.add(new IpAddressOrRange().withIpAddressOrRange(ip));
}
}
this.innerModel().withIpRules(rules);
return this;
} | class CosmosDBAccountImpl
extends GroupableResourceImpl<CosmosDBAccount, DatabaseAccountGetResultsInner, CosmosDBAccountImpl, CosmosManager>
implements CosmosDBAccount, CosmosDBAccount.Definition, CosmosDBAccount.Update {
private List<FailoverPolicy> failoverPolicies;
private boolean hasFailoverPolicyChanges;
private static final int MAX_DELAY_DUE_TO_MISSING_FAILOVERS = 60 * 10;
private Map<String, VirtualNetworkRule> virtualNetworkRulesMap;
private PrivateEndpointConnectionsImpl privateEndpointConnections;
CosmosDBAccountImpl(String name, DatabaseAccountGetResultsInner innerObject, CosmosManager manager) {
super(fixDBName(name), innerObject, manager);
this.failoverPolicies = new ArrayList<>();
this.privateEndpointConnections =
new PrivateEndpointConnectionsImpl(this.manager().serviceClient().getPrivateEndpointConnections(), this);
}
@Override
public DatabaseAccountKind kind() {
return this.innerModel().kind();
}
@Override
public String documentEndpoint() {
return this.innerModel().documentEndpoint();
}
@Override
public DatabaseAccountOfferType databaseAccountOfferType() {
return this.innerModel().databaseAccountOfferType();
}
@Override
public String ipRangeFilter() {
return this.ipRules().stream().map(IpAddressOrRange::ipAddressOrRange).collect(Collectors.joining(","));
}
@Override
public List<IpAddressOrRange> ipRules() {
return this.innerModel().ipRules();
}
@Override
public ConsistencyPolicy consistencyPolicy() {
return this.innerModel().consistencyPolicy();
}
@Override
public DefaultConsistencyLevel defaultConsistencyLevel() {
if (this.innerModel().consistencyPolicy() == null) {
throw new RuntimeException("Consistency policy is missing!");
}
return this.innerModel().consistencyPolicy().defaultConsistencyLevel();
}
@Override
public List<Location> writableReplications() {
return this.innerModel().writeLocations();
}
@Override
public List<Location> readableReplications() {
return this.innerModel().readLocations();
}
@Override
public DatabaseAccountListKeysResult listKeys() {
return this.listKeysAsync().block();
}
@Override
public Mono<DatabaseAccountListKeysResult> listKeysAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListKeysResultImpl::new);
}
@Override
public DatabaseAccountListReadOnlyKeysResult listReadOnlyKeys() {
return this.listReadOnlyKeysAsync().block();
}
@Override
public Mono<DatabaseAccountListReadOnlyKeysResult> listReadOnlyKeysAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listReadOnlyKeysAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListReadOnlyKeysResultImpl::new);
}
@Override
public DatabaseAccountListConnectionStringsResult listConnectionStrings() {
return this.listConnectionStringsAsync().block();
}
@Override
public Mono<DatabaseAccountListConnectionStringsResult> listConnectionStringsAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listConnectionStringsAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListConnectionStringsResultImpl::new);
}
@Override
public List<SqlDatabase> listSqlDatabases() {
return this.listSqlDatabasesAsync().collectList().block();
}
@Override
public PagedFlux<SqlDatabase> listSqlDatabasesAsync() {
return this
.manager()
.serviceClient()
.getSqlResources()
.listSqlDatabasesAsync(this.resourceGroupName(), this.name())
.mapPage(SqlDatabaseImpl::new);
}
@Override
public List<PrivateLinkResource> listPrivateLinkResources() {
return this.listPrivateLinkResourcesAsync().collectList().block();
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
return this
.manager()
.serviceClient()
.getPrivateLinkResources()
.listByDatabaseAccountAsync(this.resourceGroupName(), this.name())
.mapPage(PrivateLinkResourceImpl::new);
}
@Override
public PrivateLinkResource getPrivateLinkResource(String groupName) {
return this.getPrivateLinkResourceAsync(groupName).block();
}
@Override
public Mono<PrivateLinkResource> getPrivateLinkResourceAsync(String groupName) {
return this
.manager()
.serviceClient()
.getPrivateLinkResources()
.getAsync(this.resourceGroupName(), this.name(), groupName)
.map(PrivateLinkResourceImpl::new);
}
@Override
public Map<String, PrivateEndpointConnection> listPrivateEndpointConnection() {
return this.listPrivateEndpointConnectionAsync().block();
}
@Override
public Mono<Map<String, PrivateEndpointConnection>> listPrivateEndpointConnectionAsync() {
return this.privateEndpointConnections.asMapAsync();
}
@Override
public PrivateEndpointConnection getPrivateEndpointConnection(String name) {
return this.getPrivateEndpointConnectionAsync(name).block();
}
@Override
public Mono<PrivateEndpointConnection> getPrivateEndpointConnectionAsync(String name) {
return this
.privateEndpointConnections
.getImplAsync(name)
.map(privateEndpointConnection -> privateEndpointConnection);
}
@Override
public boolean multipleWriteLocationsEnabled() {
return this.innerModel().enableMultipleWriteLocations();
}
@Override
public boolean cassandraConnectorEnabled() {
return this.innerModel().enableCassandraConnector();
}
@Override
public ConnectorOffer cassandraConnectorOffer() {
return this.innerModel().connectorOffer();
}
@Override
public boolean keyBasedMetadataWriteAccessDisabled() {
return this.innerModel().disableKeyBasedMetadataWriteAccess();
}
@Override
public List<Capability> capabilities() {
List<Capability> capabilities = this.innerModel().capabilities();
if (capabilities == null) {
capabilities = new ArrayList<>();
}
return Collections.unmodifiableList(capabilities);
}
@Override
public List<VirtualNetworkRule> virtualNetworkRules() {
List<VirtualNetworkRule> result =
(this.innerModel() != null && this.innerModel().virtualNetworkRules() != null)
? this.innerModel().virtualNetworkRules()
: new ArrayList<VirtualNetworkRule>();
return Collections.unmodifiableList(result);
}
@Override
public void offlineRegion(Region region) {
this.manager().serviceClient().getDatabaseAccounts().offlineRegion(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public Mono<Void> offlineRegionAsync(Region region) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.offlineRegionAsync(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public void onlineRegion(Region region) {
this.manager().serviceClient().getDatabaseAccounts().onlineRegion(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public Mono<Void> onlineRegionAsync(Region region) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.onlineRegionAsync(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public void regenerateKey(KeyKind keyKind) {
this.manager().serviceClient().getDatabaseAccounts().regenerateKey(this.resourceGroupName(), this.name(),
new DatabaseAccountRegenerateKeyParameters().withKeyKind(keyKind));
}
@Override
public Mono<Void> regenerateKeyAsync(KeyKind keyKind) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new DatabaseAccountRegenerateKeyParameters().withKeyKind(keyKind));
}
@Override
public CosmosDBAccountImpl withKind(DatabaseAccountKind kind) {
this.innerModel().withKind(kind);
return this;
}
@Override
public CosmosDBAccountImpl withKind(DatabaseAccountKind kind, Capability... capabilities) {
this.innerModel().withKind(kind);
this.innerModel().withCapabilities(Arrays.asList(capabilities));
return this;
}
@Override
public CosmosDBAccountImpl withDataModelSql() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
return this;
}
@Override
public CosmosDBAccountImpl withDataModelMongoDB() {
this.innerModel().withKind(DatabaseAccountKind.MONGO_DB);
return this;
}
@Override
public CosmosDBAccountImpl withDataModelCassandra() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableCassandra"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Cassandra");
return this;
}
@Override
public CosmosDBAccountImpl withDataModelAzureTable() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableTable"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Table");
return this;
}
@Override
public CosmosDBAccountImpl withDataModelGremlin() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableGremlin"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Graph");
return this;
}
@Override
@Override
public CosmosDBAccountImpl withIpRules(List<IpAddressOrRange> ipRules) {
this.innerModel().withIpRules(ipRules);
return this;
}
@Override
protected Mono<DatabaseAccountGetResultsInner> getInnerAsync() {
return this.manager().serviceClient().getDatabaseAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public CosmosDBAccountImpl withWriteReplication(Region region) {
FailoverPolicy failoverPolicyInner = new FailoverPolicy();
failoverPolicyInner.withLocationName(region.name());
this.hasFailoverPolicyChanges = true;
this.failoverPolicies.add(failoverPolicyInner);
return this;
}
@Override
public CosmosDBAccountImpl withReadReplication(Region region) {
this.ensureFailoverIsInitialized();
FailoverPolicy failoverPolicyInner = new FailoverPolicy();
failoverPolicyInner.withLocationName(region.name());
failoverPolicyInner.withFailoverPriority(this.failoverPolicies.size());
this.hasFailoverPolicyChanges = true;
this.failoverPolicies.add(failoverPolicyInner);
return this;
}
@Override
public CosmosDBAccountImpl withoutReadReplication(Region region) {
this.ensureFailoverIsInitialized();
for (int i = 1; i < this.failoverPolicies.size(); i++) {
if (this.failoverPolicies.get(i).locationName() != null) {
String locName = formatLocationName(this.failoverPolicies.get(i).locationName());
if (locName.equals(region.name())) {
this.failoverPolicies.remove(i);
}
}
}
return this;
}
@Override
public CosmosDBAccountImpl withEventualConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.EVENTUAL, 0, 0);
return this;
}
@Override
public CosmosDBAccountImpl withSessionConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.SESSION, 0, 0);
return this;
}
@Override
public CosmosDBAccountImpl withBoundedStalenessConsistency(long maxStalenessPrefix, int maxIntervalInSeconds) {
this.setConsistencyPolicy(DefaultConsistencyLevel.BOUNDED_STALENESS, maxStalenessPrefix, maxIntervalInSeconds);
return this;
}
@Override
public CosmosDBAccountImpl withStrongConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.STRONG, 0, 0);
return this;
}
@Override
public PrivateEndpointConnectionImpl defineNewPrivateEndpointConnection(String name) {
return this.privateEndpointConnections.define(name);
}
@Override
public PrivateEndpointConnectionImpl updatePrivateEndpointConnection(String name) {
return this.privateEndpointConnections.update(name);
}
@Override
public CosmosDBAccountImpl withoutPrivateEndpointConnection(String name) {
this.privateEndpointConnections.remove(name);
return this;
}
CosmosDBAccountImpl withPrivateEndpointConnection(PrivateEndpointConnectionImpl privateEndpointConnection) {
this.privateEndpointConnections.addPrivateEndpointConnection(privateEndpointConnection);
return this;
}
@Override
public Mono<CosmosDBAccount> createResourceAsync() {
return this.doDatabaseUpdateCreate();
}
private DatabaseAccountCreateUpdateParameters createUpdateParametersInner(DatabaseAccountGetResultsInner inner) {
this.ensureFailoverIsInitialized();
DatabaseAccountCreateUpdateParameters createUpdateParametersInner = new DatabaseAccountCreateUpdateParameters();
createUpdateParametersInner.withLocation(this.regionName().toLowerCase(Locale.ROOT));
createUpdateParametersInner.withConsistencyPolicy(inner.consistencyPolicy());
createUpdateParametersInner.withIpRules(inner.ipRules());
createUpdateParametersInner.withKind(inner.kind());
createUpdateParametersInner.withCapabilities(inner.capabilities());
createUpdateParametersInner.withTags(inner.tags());
createUpdateParametersInner.withEnableMultipleWriteLocations(inner.enableMultipleWriteLocations());
this
.addLocationsForParameters(
new CreateUpdateLocationParameters(createUpdateParametersInner), this.failoverPolicies);
createUpdateParametersInner.withIsVirtualNetworkFilterEnabled(inner.isVirtualNetworkFilterEnabled());
createUpdateParametersInner.withEnableCassandraConnector(inner.enableCassandraConnector());
createUpdateParametersInner.withConnectorOffer(inner.connectorOffer());
createUpdateParametersInner.withEnableAutomaticFailover(inner.enableAutomaticFailover());
createUpdateParametersInner.withDisableKeyBasedMetadataWriteAccess(inner.disableKeyBasedMetadataWriteAccess());
if (this.virtualNetworkRulesMap != null) {
createUpdateParametersInner
.withVirtualNetworkRules(new ArrayList<VirtualNetworkRule>(this.virtualNetworkRulesMap.values()));
this.virtualNetworkRulesMap = null;
}
return createUpdateParametersInner;
}
private DatabaseAccountUpdateParameters updateParametersInner(DatabaseAccountGetResultsInner inner) {
this.ensureFailoverIsInitialized();
DatabaseAccountUpdateParameters updateParameters = new DatabaseAccountUpdateParameters();
updateParameters.withTags(inner.tags());
updateParameters.withLocation(this.regionName().toLowerCase(Locale.ROOT));
updateParameters.withConsistencyPolicy(inner.consistencyPolicy());
updateParameters.withIpRules(inner.ipRules());
updateParameters.withIsVirtualNetworkFilterEnabled(inner.isVirtualNetworkFilterEnabled());
updateParameters.withEnableAutomaticFailover(inner.enableAutomaticFailover());
updateParameters.withCapabilities(inner.capabilities());
updateParameters.withEnableMultipleWriteLocations(inner.enableMultipleWriteLocations());
updateParameters.withEnableCassandraConnector(inner.enableCassandraConnector());
updateParameters.withConnectorOffer(inner.connectorOffer());
updateParameters.withDisableKeyBasedMetadataWriteAccess(inner.disableKeyBasedMetadataWriteAccess());
if (virtualNetworkRulesMap != null) {
updateParameters.withVirtualNetworkRules(new ArrayList<>(this.virtualNetworkRulesMap.values()));
virtualNetworkRulesMap = null;
}
this.addLocationsForParameters(new UpdateLocationParameters(updateParameters), this.failoverPolicies);
return updateParameters;
}
private static String fixDBName(String name) {
return name.toLowerCase(Locale.ROOT);
}
private void setConsistencyPolicy(
DefaultConsistencyLevel level, long maxStalenessPrefix, int maxIntervalInSeconds) {
ConsistencyPolicy policy = new ConsistencyPolicy();
policy.withDefaultConsistencyLevel(level);
if (level == DefaultConsistencyLevel.BOUNDED_STALENESS) {
policy.withMaxStalenessPrefix(maxStalenessPrefix);
policy.withMaxIntervalInSeconds(maxIntervalInSeconds);
}
this.innerModel().withConsistencyPolicy(policy);
}
private void addLocationsForParameters(HasLocations locationParameters, List<FailoverPolicy> failoverPolicies) {
List<Location> locations = new ArrayList<Location>();
if (failoverPolicies.size() > 0) {
for (int i = 0; i < failoverPolicies.size(); i++) {
FailoverPolicy policyInner = failoverPolicies.get(i);
Location location = new Location();
location.withFailoverPriority(i);
location.withLocationName(policyInner.locationName());
locations.add(location);
}
} else {
Location location = new Location();
location.withFailoverPriority(0);
location.withLocationName(locationParameters.location());
locations.add(location);
}
locationParameters.withLocations(locations);
}
private static String formatLocationName(String locationName) {
return locationName.replace(" ", "").toLowerCase(Locale.ROOT);
}
private Mono<CosmosDBAccount> doDatabaseUpdateCreate() {
final CosmosDBAccountImpl self = this;
final List<Integer> data = new ArrayList<Integer>();
data.add(0);
Mono<DatabaseAccountGetResultsInner> request = null;
HasLocations locationParameters = null;
if (isInCreateMode()) {
final DatabaseAccountCreateUpdateParameters createUpdateParametersInner =
this.createUpdateParametersInner(this.innerModel());
request =
this
.manager()
.serviceClient()
.getDatabaseAccounts()
.createOrUpdateAsync(resourceGroupName(), name(), createUpdateParametersInner);
locationParameters = new CreateUpdateLocationParameters(createUpdateParametersInner);
} else {
final DatabaseAccountUpdateParameters updateParametersInner = this.updateParametersInner(this.innerModel());
request =
this
.manager()
.serviceClient()
.getDatabaseAccounts()
.updateAsync(resourceGroupName(), name(), updateParametersInner);
locationParameters = new UpdateLocationParameters(updateParametersInner);
}
Set<String> locations = locationParameters.locations().stream()
.map(location -> formatLocationName(location.locationName()))
.collect(Collectors.toSet());
return request
.flatMap(
databaseAccountInner -> {
self.failoverPolicies.clear();
self.hasFailoverPolicyChanges = false;
return manager()
.databaseAccounts()
.getByResourceGroupAsync(resourceGroupName(), name())
.flatMap(
databaseAccount -> {
if (MAX_DELAY_DUE_TO_MISSING_FAILOVERS > data.get(0)
&& (databaseAccount.id() == null
|| databaseAccount.id().length() == 0
|| locations.size()
!= databaseAccount.innerModel().failoverPolicies().size())) {
return Mono.empty();
}
if (isAFinalProvisioningState(databaseAccount.innerModel().provisioningState())) {
for (Location location : databaseAccount.readableReplications()) {
if (!isAFinalProvisioningState(location.provisioningState())) {
return Mono.empty();
}
if (!locations.contains(formatLocationName(location.locationName()))) {
return Mono.empty();
}
}
} else {
return Mono.empty();
}
self.setInner(databaseAccount.innerModel());
return Mono.just(databaseAccount);
})
.repeatWhenEmpty(
longFlux ->
longFlux
.flatMap(
index -> {
data.set(0, data.get(0) + 30);
return Mono.delay(ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(
manager().serviceClient().getDefaultPollInterval()));
}));
});
}
private void ensureFailoverIsInitialized() {
if (this.isInCreateMode()) {
return;
}
if (!this.hasFailoverPolicyChanges) {
this.failoverPolicies.clear();
FailoverPolicy[] policyInners = new FailoverPolicy[this.innerModel().failoverPolicies().size()];
this.innerModel().failoverPolicies().toArray(policyInners);
Arrays
.sort(
policyInners,
Comparator.comparing(FailoverPolicy::failoverPriority));
for (int i = 0; i < policyInners.length; i++) {
this.failoverPolicies.add(policyInners[i]);
}
this.hasFailoverPolicyChanges = true;
}
}
private boolean isAFinalProvisioningState(String state) {
switch (state.toLowerCase(Locale.ROOT)) {
case "succeeded":
case "canceled":
case "failed":
return true;
default:
return false;
}
}
private Map<String, VirtualNetworkRule> ensureVirtualNetworkRules() {
if (this.virtualNetworkRulesMap == null) {
this.virtualNetworkRulesMap = new HashMap<>();
if (this.innerModel() != null && this.innerModel().virtualNetworkRules() != null) {
for (VirtualNetworkRule virtualNetworkRule : this.innerModel().virtualNetworkRules()) {
this.virtualNetworkRulesMap.put(virtualNetworkRule.id(), virtualNetworkRule);
}
}
}
return this.virtualNetworkRulesMap;
}
@Override
public CosmosDBAccountImpl withVirtualNetwork(String virtualNetworkId, String subnetName) {
this.innerModel().withIsVirtualNetworkFilterEnabled(true);
String vnetId = virtualNetworkId + "/subnets/" + subnetName;
ensureVirtualNetworkRules().put(vnetId, new VirtualNetworkRule().withId(vnetId));
return this;
}
@Override
public CosmosDBAccountImpl withoutVirtualNetwork(String virtualNetworkId, String subnetName) {
Map<String, VirtualNetworkRule> vnetRules = ensureVirtualNetworkRules();
vnetRules.remove(virtualNetworkId + "/subnets/" + subnetName);
if (vnetRules.size() == 0) {
this.innerModel().withIsVirtualNetworkFilterEnabled(false);
}
return this;
}
@Override
public CosmosDBAccountImpl withVirtualNetworkRules(List<VirtualNetworkRule> virtualNetworkRules) {
Map<String, VirtualNetworkRule> vnetRules = ensureVirtualNetworkRules();
if (virtualNetworkRules == null || virtualNetworkRules.isEmpty()) {
vnetRules.clear();
this.innerModel().withIsVirtualNetworkFilterEnabled(false);
return this;
}
this.innerModel().withIsVirtualNetworkFilterEnabled(true);
for (VirtualNetworkRule vnetRule : virtualNetworkRules) {
this.virtualNetworkRulesMap.put(vnetRule.id(), vnetRule);
}
return this;
}
@Override
public CosmosDBAccountImpl withMultipleWriteLocationsEnabled(boolean enabled) {
this.innerModel().withEnableMultipleWriteLocations(enabled);
return this;
}
@Override
public CosmosDBAccountImpl withCassandraConnector(ConnectorOffer connectorOffer) {
this.innerModel().withEnableCassandraConnector(true);
this.innerModel().withConnectorOffer(connectorOffer);
return this;
}
@Override
public CosmosDBAccountImpl withoutCassandraConnector() {
this.innerModel().withEnableCassandraConnector(false);
this.innerModel().withConnectorOffer(null);
return this;
}
@Override
public CosmosDBAccountImpl withDisableKeyBaseMetadataWriteAccess(boolean disabled) {
this.innerModel().withDisableKeyBasedMetadataWriteAccess(disabled);
return this;
}
interface HasLocations {
String location();
List<Location> locations();
void withLocations(List<Location> locations);
}
static class CreateUpdateLocationParameters implements HasLocations {
private DatabaseAccountCreateUpdateParameters parameters;
CreateUpdateLocationParameters(DatabaseAccountCreateUpdateParameters parametersObject) {
parameters = parametersObject;
}
@Override
public String location() {
return parameters.location();
}
@Override
public List<Location> locations() {
return parameters.locations();
}
@Override
public void withLocations(List<Location> locations) {
parameters.withLocations(locations);
}
}
static class UpdateLocationParameters implements HasLocations {
private DatabaseAccountUpdateParameters parameters;
UpdateLocationParameters(DatabaseAccountUpdateParameters parametersObject) {
parameters = parametersObject;
}
@Override
public String location() {
return parameters.location();
}
@Override
public List<Location> locations() {
return parameters.locations();
}
@Override
public void withLocations(List<Location> locations) {
parameters.withLocations(locations);
}
}
} | class CosmosDBAccountImpl
extends GroupableResourceImpl<CosmosDBAccount, DatabaseAccountGetResultsInner, CosmosDBAccountImpl, CosmosManager>
implements CosmosDBAccount, CosmosDBAccount.Definition, CosmosDBAccount.Update {
private List<FailoverPolicy> failoverPolicies;
private boolean hasFailoverPolicyChanges;
private static final int MAX_DELAY_DUE_TO_MISSING_FAILOVERS = 60 * 10;
private Map<String, VirtualNetworkRule> virtualNetworkRulesMap;
private PrivateEndpointConnectionsImpl privateEndpointConnections;
CosmosDBAccountImpl(String name, DatabaseAccountGetResultsInner innerObject, CosmosManager manager) {
super(fixDBName(name), innerObject, manager);
this.failoverPolicies = new ArrayList<>();
this.privateEndpointConnections =
new PrivateEndpointConnectionsImpl(this.manager().serviceClient().getPrivateEndpointConnections(), this);
}
@Override
public DatabaseAccountKind kind() {
return this.innerModel().kind();
}
@Override
public String documentEndpoint() {
return this.innerModel().documentEndpoint();
}
@Override
public DatabaseAccountOfferType databaseAccountOfferType() {
return this.innerModel().databaseAccountOfferType();
}
@Override
public String ipRangeFilter() {
if (CoreUtils.isNullOrEmpty(ipRules())) {
return null;
}
return this.ipRules().stream().map(IpAddressOrRange::ipAddressOrRange).collect(Collectors.joining(","));
}
@Override
public List<IpAddressOrRange> ipRules() {
return Collections.unmodifiableList(this.innerModel().ipRules());
}
@Override
public ConsistencyPolicy consistencyPolicy() {
return this.innerModel().consistencyPolicy();
}
@Override
public DefaultConsistencyLevel defaultConsistencyLevel() {
if (this.innerModel().consistencyPolicy() == null) {
throw new RuntimeException("Consistency policy is missing!");
}
return this.innerModel().consistencyPolicy().defaultConsistencyLevel();
}
@Override
public List<Location> writableReplications() {
return this.innerModel().writeLocations();
}
@Override
public List<Location> readableReplications() {
return this.innerModel().readLocations();
}
@Override
public DatabaseAccountListKeysResult listKeys() {
return this.listKeysAsync().block();
}
@Override
public Mono<DatabaseAccountListKeysResult> listKeysAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListKeysResultImpl::new);
}
@Override
public DatabaseAccountListReadOnlyKeysResult listReadOnlyKeys() {
return this.listReadOnlyKeysAsync().block();
}
@Override
public Mono<DatabaseAccountListReadOnlyKeysResult> listReadOnlyKeysAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listReadOnlyKeysAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListReadOnlyKeysResultImpl::new);
}
@Override
public DatabaseAccountListConnectionStringsResult listConnectionStrings() {
return this.listConnectionStringsAsync().block();
}
@Override
public Mono<DatabaseAccountListConnectionStringsResult> listConnectionStringsAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listConnectionStringsAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListConnectionStringsResultImpl::new);
}
@Override
public List<SqlDatabase> listSqlDatabases() {
return this.listSqlDatabasesAsync().collectList().block();
}
@Override
public PagedFlux<SqlDatabase> listSqlDatabasesAsync() {
return this
.manager()
.serviceClient()
.getSqlResources()
.listSqlDatabasesAsync(this.resourceGroupName(), this.name())
.mapPage(SqlDatabaseImpl::new);
}
@Override
public List<PrivateLinkResource> listPrivateLinkResources() {
return this.listPrivateLinkResourcesAsync().collectList().block();
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
return this
.manager()
.serviceClient()
.getPrivateLinkResources()
.listByDatabaseAccountAsync(this.resourceGroupName(), this.name())
.mapPage(PrivateLinkResourceImpl::new);
}
@Override
public PrivateLinkResource getPrivateLinkResource(String groupName) {
return this.getPrivateLinkResourceAsync(groupName).block();
}
@Override
public Mono<PrivateLinkResource> getPrivateLinkResourceAsync(String groupName) {
return this
.manager()
.serviceClient()
.getPrivateLinkResources()
.getAsync(this.resourceGroupName(), this.name(), groupName)
.map(PrivateLinkResourceImpl::new);
}
@Override
public Map<String, PrivateEndpointConnection> listPrivateEndpointConnection() {
return this.listPrivateEndpointConnectionAsync().block();
}
@Override
public Mono<Map<String, PrivateEndpointConnection>> listPrivateEndpointConnectionAsync() {
return this.privateEndpointConnections.asMapAsync();
}
@Override
public PrivateEndpointConnection getPrivateEndpointConnection(String name) {
return this.getPrivateEndpointConnectionAsync(name).block();
}
@Override
public Mono<PrivateEndpointConnection> getPrivateEndpointConnectionAsync(String name) {
return this
.privateEndpointConnections
.getImplAsync(name)
.map(privateEndpointConnection -> privateEndpointConnection);
}
@Override
public boolean multipleWriteLocationsEnabled() {
return this.innerModel().enableMultipleWriteLocations();
}
@Override
public boolean cassandraConnectorEnabled() {
return this.innerModel().enableCassandraConnector();
}
@Override
public ConnectorOffer cassandraConnectorOffer() {
return this.innerModel().connectorOffer();
}
@Override
public boolean keyBasedMetadataWriteAccessDisabled() {
return this.innerModel().disableKeyBasedMetadataWriteAccess();
}
@Override
public List<Capability> capabilities() {
List<Capability> capabilities = this.innerModel().capabilities();
if (capabilities == null) {
capabilities = new ArrayList<>();
}
return Collections.unmodifiableList(capabilities);
}
@Override
public List<VirtualNetworkRule> virtualNetworkRules() {
List<VirtualNetworkRule> result =
(this.innerModel() != null && this.innerModel().virtualNetworkRules() != null)
? this.innerModel().virtualNetworkRules()
: new ArrayList<VirtualNetworkRule>();
return Collections.unmodifiableList(result);
}
@Override
public void offlineRegion(Region region) {
this.manager().serviceClient().getDatabaseAccounts().offlineRegion(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public Mono<Void> offlineRegionAsync(Region region) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.offlineRegionAsync(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public void onlineRegion(Region region) {
this.manager().serviceClient().getDatabaseAccounts().onlineRegion(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public Mono<Void> onlineRegionAsync(Region region) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.onlineRegionAsync(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public void regenerateKey(KeyKind keyKind) {
this.manager().serviceClient().getDatabaseAccounts().regenerateKey(this.resourceGroupName(), this.name(),
new DatabaseAccountRegenerateKeyParameters().withKeyKind(keyKind));
}
@Override
public Mono<Void> regenerateKeyAsync(KeyKind keyKind) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new DatabaseAccountRegenerateKeyParameters().withKeyKind(keyKind));
}
@Override
public CosmosDBAccountImpl withKind(DatabaseAccountKind kind) {
this.innerModel().withKind(kind);
return this;
}
@Override
public CosmosDBAccountImpl withKind(DatabaseAccountKind kind, Capability... capabilities) {
this.innerModel().withKind(kind);
this.innerModel().withCapabilities(Arrays.asList(capabilities));
return this;
}
@Override
public CosmosDBAccountImpl withDataModelSql() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
return this;
}
@Override
public CosmosDBAccountImpl withDataModelMongoDB() {
this.innerModel().withKind(DatabaseAccountKind.MONGO_DB);
return this;
}
@Override
public CosmosDBAccountImpl withDataModelCassandra() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableCassandra"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Cassandra");
return this;
}
@Override
public CosmosDBAccountImpl withDataModelAzureTable() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableTable"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Table");
return this;
}
@Override
public CosmosDBAccountImpl withDataModelGremlin() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableGremlin"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Graph");
return this;
}
@Override
@Override
public CosmosDBAccountImpl withIpRules(List<IpAddressOrRange> ipRules) {
this.innerModel().withIpRules(ipRules);
return this;
}
@Override
protected Mono<DatabaseAccountGetResultsInner> getInnerAsync() {
return this.manager().serviceClient().getDatabaseAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public CosmosDBAccountImpl withWriteReplication(Region region) {
FailoverPolicy failoverPolicyInner = new FailoverPolicy();
failoverPolicyInner.withLocationName(region.name());
this.hasFailoverPolicyChanges = true;
this.failoverPolicies.add(failoverPolicyInner);
return this;
}
@Override
public CosmosDBAccountImpl withReadReplication(Region region) {
this.ensureFailoverIsInitialized();
FailoverPolicy failoverPolicyInner = new FailoverPolicy();
failoverPolicyInner.withLocationName(region.name());
failoverPolicyInner.withFailoverPriority(this.failoverPolicies.size());
this.hasFailoverPolicyChanges = true;
this.failoverPolicies.add(failoverPolicyInner);
return this;
}
@Override
public CosmosDBAccountImpl withoutReadReplication(Region region) {
this.ensureFailoverIsInitialized();
for (int i = 1; i < this.failoverPolicies.size(); i++) {
if (this.failoverPolicies.get(i).locationName() != null) {
String locName = formatLocationName(this.failoverPolicies.get(i).locationName());
if (locName.equals(region.name())) {
this.failoverPolicies.remove(i);
}
}
}
return this;
}
@Override
public CosmosDBAccountImpl withEventualConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.EVENTUAL, 0, 0);
return this;
}
@Override
public CosmosDBAccountImpl withSessionConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.SESSION, 0, 0);
return this;
}
@Override
public CosmosDBAccountImpl withBoundedStalenessConsistency(long maxStalenessPrefix, int maxIntervalInSeconds) {
this.setConsistencyPolicy(DefaultConsistencyLevel.BOUNDED_STALENESS, maxStalenessPrefix, maxIntervalInSeconds);
return this;
}
@Override
public CosmosDBAccountImpl withStrongConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.STRONG, 0, 0);
return this;
}
@Override
public PrivateEndpointConnectionImpl defineNewPrivateEndpointConnection(String name) {
return this.privateEndpointConnections.define(name);
}
@Override
public PrivateEndpointConnectionImpl updatePrivateEndpointConnection(String name) {
return this.privateEndpointConnections.update(name);
}
@Override
public CosmosDBAccountImpl withoutPrivateEndpointConnection(String name) {
this.privateEndpointConnections.remove(name);
return this;
}
CosmosDBAccountImpl withPrivateEndpointConnection(PrivateEndpointConnectionImpl privateEndpointConnection) {
this.privateEndpointConnections.addPrivateEndpointConnection(privateEndpointConnection);
return this;
}
@Override
public Mono<CosmosDBAccount> createResourceAsync() {
return this.doDatabaseUpdateCreate();
}
private DatabaseAccountCreateUpdateParameters createUpdateParametersInner(DatabaseAccountGetResultsInner inner) {
this.ensureFailoverIsInitialized();
DatabaseAccountCreateUpdateParameters createUpdateParametersInner = new DatabaseAccountCreateUpdateParameters();
createUpdateParametersInner.withLocation(this.regionName().toLowerCase(Locale.ROOT));
createUpdateParametersInner.withConsistencyPolicy(inner.consistencyPolicy());
createUpdateParametersInner.withIpRules(inner.ipRules());
createUpdateParametersInner.withKind(inner.kind());
createUpdateParametersInner.withCapabilities(inner.capabilities());
createUpdateParametersInner.withTags(inner.tags());
createUpdateParametersInner.withEnableMultipleWriteLocations(inner.enableMultipleWriteLocations());
this
.addLocationsForParameters(
new CreateUpdateLocationParameters(createUpdateParametersInner), this.failoverPolicies);
createUpdateParametersInner.withIsVirtualNetworkFilterEnabled(inner.isVirtualNetworkFilterEnabled());
createUpdateParametersInner.withEnableCassandraConnector(inner.enableCassandraConnector());
createUpdateParametersInner.withConnectorOffer(inner.connectorOffer());
createUpdateParametersInner.withEnableAutomaticFailover(inner.enableAutomaticFailover());
createUpdateParametersInner.withDisableKeyBasedMetadataWriteAccess(inner.disableKeyBasedMetadataWriteAccess());
if (this.virtualNetworkRulesMap != null) {
createUpdateParametersInner
.withVirtualNetworkRules(new ArrayList<VirtualNetworkRule>(this.virtualNetworkRulesMap.values()));
this.virtualNetworkRulesMap = null;
}
return createUpdateParametersInner;
}
private DatabaseAccountUpdateParameters updateParametersInner(DatabaseAccountGetResultsInner inner) {
this.ensureFailoverIsInitialized();
DatabaseAccountUpdateParameters updateParameters = new DatabaseAccountUpdateParameters();
updateParameters.withTags(inner.tags());
updateParameters.withLocation(this.regionName().toLowerCase(Locale.ROOT));
updateParameters.withConsistencyPolicy(inner.consistencyPolicy());
updateParameters.withIpRules(inner.ipRules());
updateParameters.withIsVirtualNetworkFilterEnabled(inner.isVirtualNetworkFilterEnabled());
updateParameters.withEnableAutomaticFailover(inner.enableAutomaticFailover());
updateParameters.withCapabilities(inner.capabilities());
updateParameters.withEnableMultipleWriteLocations(inner.enableMultipleWriteLocations());
updateParameters.withEnableCassandraConnector(inner.enableCassandraConnector());
updateParameters.withConnectorOffer(inner.connectorOffer());
updateParameters.withDisableKeyBasedMetadataWriteAccess(inner.disableKeyBasedMetadataWriteAccess());
if (virtualNetworkRulesMap != null) {
updateParameters.withVirtualNetworkRules(new ArrayList<>(this.virtualNetworkRulesMap.values()));
virtualNetworkRulesMap = null;
}
this.addLocationsForParameters(new UpdateLocationParameters(updateParameters), this.failoverPolicies);
return updateParameters;
}
private static String fixDBName(String name) {
return name.toLowerCase(Locale.ROOT);
}
private void setConsistencyPolicy(
DefaultConsistencyLevel level, long maxStalenessPrefix, int maxIntervalInSeconds) {
ConsistencyPolicy policy = new ConsistencyPolicy();
policy.withDefaultConsistencyLevel(level);
if (level == DefaultConsistencyLevel.BOUNDED_STALENESS) {
policy.withMaxStalenessPrefix(maxStalenessPrefix);
policy.withMaxIntervalInSeconds(maxIntervalInSeconds);
}
this.innerModel().withConsistencyPolicy(policy);
}
private void addLocationsForParameters(HasLocations locationParameters, List<FailoverPolicy> failoverPolicies) {
List<Location> locations = new ArrayList<Location>();
if (failoverPolicies.size() > 0) {
for (int i = 0; i < failoverPolicies.size(); i++) {
FailoverPolicy policyInner = failoverPolicies.get(i);
Location location = new Location();
location.withFailoverPriority(i);
location.withLocationName(policyInner.locationName());
locations.add(location);
}
} else {
Location location = new Location();
location.withFailoverPriority(0);
location.withLocationName(locationParameters.location());
locations.add(location);
}
locationParameters.withLocations(locations);
}
private static String formatLocationName(String locationName) {
return locationName.replace(" ", "").toLowerCase(Locale.ROOT);
}
private Mono<CosmosDBAccount> doDatabaseUpdateCreate() {
final CosmosDBAccountImpl self = this;
final List<Integer> data = new ArrayList<Integer>();
data.add(0);
Mono<DatabaseAccountGetResultsInner> request = null;
HasLocations locationParameters = null;
if (isInCreateMode()) {
final DatabaseAccountCreateUpdateParameters createUpdateParametersInner =
this.createUpdateParametersInner(this.innerModel());
request =
this
.manager()
.serviceClient()
.getDatabaseAccounts()
.createOrUpdateAsync(resourceGroupName(), name(), createUpdateParametersInner);
locationParameters = new CreateUpdateLocationParameters(createUpdateParametersInner);
} else {
final DatabaseAccountUpdateParameters updateParametersInner = this.updateParametersInner(this.innerModel());
request =
this
.manager()
.serviceClient()
.getDatabaseAccounts()
.updateAsync(resourceGroupName(), name(), updateParametersInner);
locationParameters = new UpdateLocationParameters(updateParametersInner);
}
Set<String> locations = locationParameters.locations().stream()
.map(location -> formatLocationName(location.locationName()))
.collect(Collectors.toSet());
return request
.flatMap(
databaseAccountInner -> {
self.failoverPolicies.clear();
self.hasFailoverPolicyChanges = false;
return manager()
.databaseAccounts()
.getByResourceGroupAsync(resourceGroupName(), name())
.flatMap(
databaseAccount -> {
if (MAX_DELAY_DUE_TO_MISSING_FAILOVERS > data.get(0)
&& (databaseAccount.id() == null
|| databaseAccount.id().length() == 0
|| locations.size()
!= databaseAccount.innerModel().failoverPolicies().size())) {
return Mono.empty();
}
if (isAFinalProvisioningState(databaseAccount.innerModel().provisioningState())) {
for (Location location : databaseAccount.readableReplications()) {
if (!isAFinalProvisioningState(location.provisioningState())) {
return Mono.empty();
}
if (!locations.contains(formatLocationName(location.locationName()))) {
return Mono.empty();
}
}
} else {
return Mono.empty();
}
self.setInner(databaseAccount.innerModel());
return Mono.just(databaseAccount);
})
.repeatWhenEmpty(
longFlux ->
longFlux
.flatMap(
index -> {
data.set(0, data.get(0) + 30);
return Mono.delay(ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(
manager().serviceClient().getDefaultPollInterval()));
}));
});
}
private void ensureFailoverIsInitialized() {
if (this.isInCreateMode()) {
return;
}
if (!this.hasFailoverPolicyChanges) {
this.failoverPolicies.clear();
FailoverPolicy[] policyInners = new FailoverPolicy[this.innerModel().failoverPolicies().size()];
this.innerModel().failoverPolicies().toArray(policyInners);
Arrays
.sort(
policyInners,
Comparator.comparing(FailoverPolicy::failoverPriority));
for (int i = 0; i < policyInners.length; i++) {
this.failoverPolicies.add(policyInners[i]);
}
this.hasFailoverPolicyChanges = true;
}
}
private boolean isAFinalProvisioningState(String state) {
switch (state.toLowerCase(Locale.ROOT)) {
case "succeeded":
case "canceled":
case "failed":
return true;
default:
return false;
}
}
private Map<String, VirtualNetworkRule> ensureVirtualNetworkRules() {
if (this.virtualNetworkRulesMap == null) {
this.virtualNetworkRulesMap = new HashMap<>();
if (this.innerModel() != null && this.innerModel().virtualNetworkRules() != null) {
for (VirtualNetworkRule virtualNetworkRule : this.innerModel().virtualNetworkRules()) {
this.virtualNetworkRulesMap.put(virtualNetworkRule.id(), virtualNetworkRule);
}
}
}
return this.virtualNetworkRulesMap;
}
@Override
public CosmosDBAccountImpl withVirtualNetwork(String virtualNetworkId, String subnetName) {
this.innerModel().withIsVirtualNetworkFilterEnabled(true);
String vnetId = virtualNetworkId + "/subnets/" + subnetName;
ensureVirtualNetworkRules().put(vnetId, new VirtualNetworkRule().withId(vnetId));
return this;
}
@Override
public CosmosDBAccountImpl withoutVirtualNetwork(String virtualNetworkId, String subnetName) {
Map<String, VirtualNetworkRule> vnetRules = ensureVirtualNetworkRules();
vnetRules.remove(virtualNetworkId + "/subnets/" + subnetName);
if (vnetRules.size() == 0) {
this.innerModel().withIsVirtualNetworkFilterEnabled(false);
}
return this;
}
@Override
public CosmosDBAccountImpl withVirtualNetworkRules(List<VirtualNetworkRule> virtualNetworkRules) {
Map<String, VirtualNetworkRule> vnetRules = ensureVirtualNetworkRules();
if (virtualNetworkRules == null || virtualNetworkRules.isEmpty()) {
vnetRules.clear();
this.innerModel().withIsVirtualNetworkFilterEnabled(false);
return this;
}
this.innerModel().withIsVirtualNetworkFilterEnabled(true);
for (VirtualNetworkRule vnetRule : virtualNetworkRules) {
this.virtualNetworkRulesMap.put(vnetRule.id(), vnetRule);
}
return this;
}
@Override
public CosmosDBAccountImpl withMultipleWriteLocationsEnabled(boolean enabled) {
this.innerModel().withEnableMultipleWriteLocations(enabled);
return this;
}
@Override
public CosmosDBAccountImpl withCassandraConnector(ConnectorOffer connectorOffer) {
this.innerModel().withEnableCassandraConnector(true);
this.innerModel().withConnectorOffer(connectorOffer);
return this;
}
@Override
public CosmosDBAccountImpl withoutCassandraConnector() {
this.innerModel().withEnableCassandraConnector(false);
this.innerModel().withConnectorOffer(null);
return this;
}
@Override
public CosmosDBAccountImpl withDisableKeyBaseMetadataWriteAccess(boolean disabled) {
this.innerModel().withDisableKeyBasedMetadataWriteAccess(disabled);
return this;
}
interface HasLocations {
String location();
List<Location> locations();
void withLocations(List<Location> locations);
}
static class CreateUpdateLocationParameters implements HasLocations {
private DatabaseAccountCreateUpdateParameters parameters;
CreateUpdateLocationParameters(DatabaseAccountCreateUpdateParameters parametersObject) {
parameters = parametersObject;
}
@Override
public String location() {
return parameters.location();
}
@Override
public List<Location> locations() {
return parameters.locations();
}
@Override
public void withLocations(List<Location> locations) {
parameters.withLocations(locations);
}
}
static class UpdateLocationParameters implements HasLocations {
private DatabaseAccountUpdateParameters parameters;
UpdateLocationParameters(DatabaseAccountUpdateParameters parametersObject) {
parameters = parametersObject;
}
@Override
public String location() {
return parameters.location();
}
@Override
public List<Location> locations() {
return parameters.locations();
}
@Override
public void withLocations(List<Location> locations) {
parameters.withLocations(locations);
}
}
} |
done | public CosmosDBAccountImpl withIpRangeFilter(String ipRangeFilter) {
List<IpAddressOrRange> rules = new ArrayList<>();
for (String ip : ipRangeFilter.split(",")) {
rules.add(new IpAddressOrRange().withIpAddressOrRange(ip));
}
this.innerModel().withIpRules(rules);
return this;
} | } | public CosmosDBAccountImpl withIpRangeFilter(String ipRangeFilter) {
List<IpAddressOrRange> rules = new ArrayList<>();
if (!CoreUtils.isNullOrEmpty(ipRangeFilter)) {
for (String ip : ipRangeFilter.split(",")) {
rules.add(new IpAddressOrRange().withIpAddressOrRange(ip));
}
}
this.innerModel().withIpRules(rules);
return this;
} | class CosmosDBAccountImpl
extends GroupableResourceImpl<CosmosDBAccount, DatabaseAccountGetResultsInner, CosmosDBAccountImpl, CosmosManager>
implements CosmosDBAccount, CosmosDBAccount.Definition, CosmosDBAccount.Update {
private List<FailoverPolicy> failoverPolicies;
private boolean hasFailoverPolicyChanges;
private static final int MAX_DELAY_DUE_TO_MISSING_FAILOVERS = 60 * 10;
private Map<String, VirtualNetworkRule> virtualNetworkRulesMap;
private PrivateEndpointConnectionsImpl privateEndpointConnections;
CosmosDBAccountImpl(String name, DatabaseAccountGetResultsInner innerObject, CosmosManager manager) {
super(fixDBName(name), innerObject, manager);
this.failoverPolicies = new ArrayList<>();
this.privateEndpointConnections =
new PrivateEndpointConnectionsImpl(this.manager().serviceClient().getPrivateEndpointConnections(), this);
}
@Override
public DatabaseAccountKind kind() {
return this.innerModel().kind();
}
@Override
public String documentEndpoint() {
return this.innerModel().documentEndpoint();
}
@Override
public DatabaseAccountOfferType databaseAccountOfferType() {
return this.innerModel().databaseAccountOfferType();
}
@Override
public String ipRangeFilter() {
return this.ipRules().stream().map(IpAddressOrRange::ipAddressOrRange).collect(Collectors.joining(","));
}
@Override
public List<IpAddressOrRange> ipRules() {
return this.innerModel().ipRules();
}
@Override
public ConsistencyPolicy consistencyPolicy() {
return this.innerModel().consistencyPolicy();
}
@Override
public DefaultConsistencyLevel defaultConsistencyLevel() {
if (this.innerModel().consistencyPolicy() == null) {
throw new RuntimeException("Consistency policy is missing!");
}
return this.innerModel().consistencyPolicy().defaultConsistencyLevel();
}
@Override
public List<Location> writableReplications() {
return this.innerModel().writeLocations();
}
@Override
public List<Location> readableReplications() {
return this.innerModel().readLocations();
}
@Override
public DatabaseAccountListKeysResult listKeys() {
return this.listKeysAsync().block();
}
@Override
public Mono<DatabaseAccountListKeysResult> listKeysAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListKeysResultImpl::new);
}
@Override
public DatabaseAccountListReadOnlyKeysResult listReadOnlyKeys() {
return this.listReadOnlyKeysAsync().block();
}
@Override
public Mono<DatabaseAccountListReadOnlyKeysResult> listReadOnlyKeysAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listReadOnlyKeysAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListReadOnlyKeysResultImpl::new);
}
@Override
public DatabaseAccountListConnectionStringsResult listConnectionStrings() {
return this.listConnectionStringsAsync().block();
}
@Override
public Mono<DatabaseAccountListConnectionStringsResult> listConnectionStringsAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listConnectionStringsAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListConnectionStringsResultImpl::new);
}
@Override
public List<SqlDatabase> listSqlDatabases() {
return this.listSqlDatabasesAsync().collectList().block();
}
@Override
public PagedFlux<SqlDatabase> listSqlDatabasesAsync() {
return this
.manager()
.serviceClient()
.getSqlResources()
.listSqlDatabasesAsync(this.resourceGroupName(), this.name())
.mapPage(SqlDatabaseImpl::new);
}
@Override
public List<PrivateLinkResource> listPrivateLinkResources() {
return this.listPrivateLinkResourcesAsync().collectList().block();
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
return this
.manager()
.serviceClient()
.getPrivateLinkResources()
.listByDatabaseAccountAsync(this.resourceGroupName(), this.name())
.mapPage(PrivateLinkResourceImpl::new);
}
@Override
public PrivateLinkResource getPrivateLinkResource(String groupName) {
return this.getPrivateLinkResourceAsync(groupName).block();
}
@Override
public Mono<PrivateLinkResource> getPrivateLinkResourceAsync(String groupName) {
return this
.manager()
.serviceClient()
.getPrivateLinkResources()
.getAsync(this.resourceGroupName(), this.name(), groupName)
.map(PrivateLinkResourceImpl::new);
}
@Override
public Map<String, PrivateEndpointConnection> listPrivateEndpointConnection() {
return this.listPrivateEndpointConnectionAsync().block();
}
@Override
public Mono<Map<String, PrivateEndpointConnection>> listPrivateEndpointConnectionAsync() {
return this.privateEndpointConnections.asMapAsync();
}
@Override
public PrivateEndpointConnection getPrivateEndpointConnection(String name) {
return this.getPrivateEndpointConnectionAsync(name).block();
}
@Override
public Mono<PrivateEndpointConnection> getPrivateEndpointConnectionAsync(String name) {
return this
.privateEndpointConnections
.getImplAsync(name)
.map(privateEndpointConnection -> privateEndpointConnection);
}
@Override
public boolean multipleWriteLocationsEnabled() {
return this.innerModel().enableMultipleWriteLocations();
}
@Override
public boolean cassandraConnectorEnabled() {
return this.innerModel().enableCassandraConnector();
}
@Override
public ConnectorOffer cassandraConnectorOffer() {
return this.innerModel().connectorOffer();
}
@Override
public boolean keyBasedMetadataWriteAccessDisabled() {
return this.innerModel().disableKeyBasedMetadataWriteAccess();
}
@Override
public List<Capability> capabilities() {
List<Capability> capabilities = this.innerModel().capabilities();
if (capabilities == null) {
capabilities = new ArrayList<>();
}
return Collections.unmodifiableList(capabilities);
}
@Override
public List<VirtualNetworkRule> virtualNetworkRules() {
List<VirtualNetworkRule> result =
(this.innerModel() != null && this.innerModel().virtualNetworkRules() != null)
? this.innerModel().virtualNetworkRules()
: new ArrayList<VirtualNetworkRule>();
return Collections.unmodifiableList(result);
}
@Override
public void offlineRegion(Region region) {
this.manager().serviceClient().getDatabaseAccounts().offlineRegion(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public Mono<Void> offlineRegionAsync(Region region) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.offlineRegionAsync(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public void onlineRegion(Region region) {
this.manager().serviceClient().getDatabaseAccounts().onlineRegion(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public Mono<Void> onlineRegionAsync(Region region) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.onlineRegionAsync(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public void regenerateKey(KeyKind keyKind) {
this.manager().serviceClient().getDatabaseAccounts().regenerateKey(this.resourceGroupName(), this.name(),
new DatabaseAccountRegenerateKeyParameters().withKeyKind(keyKind));
}
@Override
public Mono<Void> regenerateKeyAsync(KeyKind keyKind) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new DatabaseAccountRegenerateKeyParameters().withKeyKind(keyKind));
}
@Override
public CosmosDBAccountImpl withKind(DatabaseAccountKind kind) {
this.innerModel().withKind(kind);
return this;
}
@Override
public CosmosDBAccountImpl withKind(DatabaseAccountKind kind, Capability... capabilities) {
this.innerModel().withKind(kind);
this.innerModel().withCapabilities(Arrays.asList(capabilities));
return this;
}
@Override
public CosmosDBAccountImpl withDataModelSql() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
return this;
}
@Override
public CosmosDBAccountImpl withDataModelMongoDB() {
this.innerModel().withKind(DatabaseAccountKind.MONGO_DB);
return this;
}
@Override
public CosmosDBAccountImpl withDataModelCassandra() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableCassandra"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Cassandra");
return this;
}
@Override
public CosmosDBAccountImpl withDataModelAzureTable() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableTable"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Table");
return this;
}
@Override
public CosmosDBAccountImpl withDataModelGremlin() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableGremlin"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Graph");
return this;
}
@Override
@Override
public CosmosDBAccountImpl withIpRules(List<IpAddressOrRange> ipRules) {
this.innerModel().withIpRules(ipRules);
return this;
}
@Override
protected Mono<DatabaseAccountGetResultsInner> getInnerAsync() {
return this.manager().serviceClient().getDatabaseAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public CosmosDBAccountImpl withWriteReplication(Region region) {
FailoverPolicy failoverPolicyInner = new FailoverPolicy();
failoverPolicyInner.withLocationName(region.name());
this.hasFailoverPolicyChanges = true;
this.failoverPolicies.add(failoverPolicyInner);
return this;
}
@Override
public CosmosDBAccountImpl withReadReplication(Region region) {
this.ensureFailoverIsInitialized();
FailoverPolicy failoverPolicyInner = new FailoverPolicy();
failoverPolicyInner.withLocationName(region.name());
failoverPolicyInner.withFailoverPriority(this.failoverPolicies.size());
this.hasFailoverPolicyChanges = true;
this.failoverPolicies.add(failoverPolicyInner);
return this;
}
@Override
public CosmosDBAccountImpl withoutReadReplication(Region region) {
this.ensureFailoverIsInitialized();
for (int i = 1; i < this.failoverPolicies.size(); i++) {
if (this.failoverPolicies.get(i).locationName() != null) {
String locName = formatLocationName(this.failoverPolicies.get(i).locationName());
if (locName.equals(region.name())) {
this.failoverPolicies.remove(i);
}
}
}
return this;
}
@Override
public CosmosDBAccountImpl withEventualConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.EVENTUAL, 0, 0);
return this;
}
@Override
public CosmosDBAccountImpl withSessionConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.SESSION, 0, 0);
return this;
}
@Override
public CosmosDBAccountImpl withBoundedStalenessConsistency(long maxStalenessPrefix, int maxIntervalInSeconds) {
this.setConsistencyPolicy(DefaultConsistencyLevel.BOUNDED_STALENESS, maxStalenessPrefix, maxIntervalInSeconds);
return this;
}
@Override
public CosmosDBAccountImpl withStrongConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.STRONG, 0, 0);
return this;
}
@Override
public PrivateEndpointConnectionImpl defineNewPrivateEndpointConnection(String name) {
return this.privateEndpointConnections.define(name);
}
@Override
public PrivateEndpointConnectionImpl updatePrivateEndpointConnection(String name) {
return this.privateEndpointConnections.update(name);
}
@Override
public CosmosDBAccountImpl withoutPrivateEndpointConnection(String name) {
this.privateEndpointConnections.remove(name);
return this;
}
CosmosDBAccountImpl withPrivateEndpointConnection(PrivateEndpointConnectionImpl privateEndpointConnection) {
this.privateEndpointConnections.addPrivateEndpointConnection(privateEndpointConnection);
return this;
}
@Override
public Mono<CosmosDBAccount> createResourceAsync() {
return this.doDatabaseUpdateCreate();
}
private DatabaseAccountCreateUpdateParameters createUpdateParametersInner(DatabaseAccountGetResultsInner inner) {
this.ensureFailoverIsInitialized();
DatabaseAccountCreateUpdateParameters createUpdateParametersInner = new DatabaseAccountCreateUpdateParameters();
createUpdateParametersInner.withLocation(this.regionName().toLowerCase(Locale.ROOT));
createUpdateParametersInner.withConsistencyPolicy(inner.consistencyPolicy());
createUpdateParametersInner.withIpRules(inner.ipRules());
createUpdateParametersInner.withKind(inner.kind());
createUpdateParametersInner.withCapabilities(inner.capabilities());
createUpdateParametersInner.withTags(inner.tags());
createUpdateParametersInner.withEnableMultipleWriteLocations(inner.enableMultipleWriteLocations());
this
.addLocationsForParameters(
new CreateUpdateLocationParameters(createUpdateParametersInner), this.failoverPolicies);
createUpdateParametersInner.withIsVirtualNetworkFilterEnabled(inner.isVirtualNetworkFilterEnabled());
createUpdateParametersInner.withEnableCassandraConnector(inner.enableCassandraConnector());
createUpdateParametersInner.withConnectorOffer(inner.connectorOffer());
createUpdateParametersInner.withEnableAutomaticFailover(inner.enableAutomaticFailover());
createUpdateParametersInner.withDisableKeyBasedMetadataWriteAccess(inner.disableKeyBasedMetadataWriteAccess());
if (this.virtualNetworkRulesMap != null) {
createUpdateParametersInner
.withVirtualNetworkRules(new ArrayList<VirtualNetworkRule>(this.virtualNetworkRulesMap.values()));
this.virtualNetworkRulesMap = null;
}
return createUpdateParametersInner;
}
private DatabaseAccountUpdateParameters updateParametersInner(DatabaseAccountGetResultsInner inner) {
this.ensureFailoverIsInitialized();
DatabaseAccountUpdateParameters updateParameters = new DatabaseAccountUpdateParameters();
updateParameters.withTags(inner.tags());
updateParameters.withLocation(this.regionName().toLowerCase(Locale.ROOT));
updateParameters.withConsistencyPolicy(inner.consistencyPolicy());
updateParameters.withIpRules(inner.ipRules());
updateParameters.withIsVirtualNetworkFilterEnabled(inner.isVirtualNetworkFilterEnabled());
updateParameters.withEnableAutomaticFailover(inner.enableAutomaticFailover());
updateParameters.withCapabilities(inner.capabilities());
updateParameters.withEnableMultipleWriteLocations(inner.enableMultipleWriteLocations());
updateParameters.withEnableCassandraConnector(inner.enableCassandraConnector());
updateParameters.withConnectorOffer(inner.connectorOffer());
updateParameters.withDisableKeyBasedMetadataWriteAccess(inner.disableKeyBasedMetadataWriteAccess());
if (virtualNetworkRulesMap != null) {
updateParameters.withVirtualNetworkRules(new ArrayList<>(this.virtualNetworkRulesMap.values()));
virtualNetworkRulesMap = null;
}
this.addLocationsForParameters(new UpdateLocationParameters(updateParameters), this.failoverPolicies);
return updateParameters;
}
private static String fixDBName(String name) {
return name.toLowerCase(Locale.ROOT);
}
private void setConsistencyPolicy(
DefaultConsistencyLevel level, long maxStalenessPrefix, int maxIntervalInSeconds) {
ConsistencyPolicy policy = new ConsistencyPolicy();
policy.withDefaultConsistencyLevel(level);
if (level == DefaultConsistencyLevel.BOUNDED_STALENESS) {
policy.withMaxStalenessPrefix(maxStalenessPrefix);
policy.withMaxIntervalInSeconds(maxIntervalInSeconds);
}
this.innerModel().withConsistencyPolicy(policy);
}
private void addLocationsForParameters(HasLocations locationParameters, List<FailoverPolicy> failoverPolicies) {
List<Location> locations = new ArrayList<Location>();
if (failoverPolicies.size() > 0) {
for (int i = 0; i < failoverPolicies.size(); i++) {
FailoverPolicy policyInner = failoverPolicies.get(i);
Location location = new Location();
location.withFailoverPriority(i);
location.withLocationName(policyInner.locationName());
locations.add(location);
}
} else {
Location location = new Location();
location.withFailoverPriority(0);
location.withLocationName(locationParameters.location());
locations.add(location);
}
locationParameters.withLocations(locations);
}
private static String formatLocationName(String locationName) {
return locationName.replace(" ", "").toLowerCase(Locale.ROOT);
}
private Mono<CosmosDBAccount> doDatabaseUpdateCreate() {
final CosmosDBAccountImpl self = this;
final List<Integer> data = new ArrayList<Integer>();
data.add(0);
Mono<DatabaseAccountGetResultsInner> request = null;
HasLocations locationParameters = null;
if (isInCreateMode()) {
final DatabaseAccountCreateUpdateParameters createUpdateParametersInner =
this.createUpdateParametersInner(this.innerModel());
request =
this
.manager()
.serviceClient()
.getDatabaseAccounts()
.createOrUpdateAsync(resourceGroupName(), name(), createUpdateParametersInner);
locationParameters = new CreateUpdateLocationParameters(createUpdateParametersInner);
} else {
final DatabaseAccountUpdateParameters updateParametersInner = this.updateParametersInner(this.innerModel());
request =
this
.manager()
.serviceClient()
.getDatabaseAccounts()
.updateAsync(resourceGroupName(), name(), updateParametersInner);
locationParameters = new UpdateLocationParameters(updateParametersInner);
}
Set<String> locations = locationParameters.locations().stream()
.map(location -> formatLocationName(location.locationName()))
.collect(Collectors.toSet());
return request
.flatMap(
databaseAccountInner -> {
self.failoverPolicies.clear();
self.hasFailoverPolicyChanges = false;
return manager()
.databaseAccounts()
.getByResourceGroupAsync(resourceGroupName(), name())
.flatMap(
databaseAccount -> {
if (MAX_DELAY_DUE_TO_MISSING_FAILOVERS > data.get(0)
&& (databaseAccount.id() == null
|| databaseAccount.id().length() == 0
|| locations.size()
!= databaseAccount.innerModel().failoverPolicies().size())) {
return Mono.empty();
}
if (isAFinalProvisioningState(databaseAccount.innerModel().provisioningState())) {
for (Location location : databaseAccount.readableReplications()) {
if (!isAFinalProvisioningState(location.provisioningState())) {
return Mono.empty();
}
if (!locations.contains(formatLocationName(location.locationName()))) {
return Mono.empty();
}
}
} else {
return Mono.empty();
}
self.setInner(databaseAccount.innerModel());
return Mono.just(databaseAccount);
})
.repeatWhenEmpty(
longFlux ->
longFlux
.flatMap(
index -> {
data.set(0, data.get(0) + 30);
return Mono.delay(ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(
manager().serviceClient().getDefaultPollInterval()));
}));
});
}
private void ensureFailoverIsInitialized() {
if (this.isInCreateMode()) {
return;
}
if (!this.hasFailoverPolicyChanges) {
this.failoverPolicies.clear();
FailoverPolicy[] policyInners = new FailoverPolicy[this.innerModel().failoverPolicies().size()];
this.innerModel().failoverPolicies().toArray(policyInners);
Arrays
.sort(
policyInners,
Comparator.comparing(FailoverPolicy::failoverPriority));
for (int i = 0; i < policyInners.length; i++) {
this.failoverPolicies.add(policyInners[i]);
}
this.hasFailoverPolicyChanges = true;
}
}
private boolean isAFinalProvisioningState(String state) {
switch (state.toLowerCase(Locale.ROOT)) {
case "succeeded":
case "canceled":
case "failed":
return true;
default:
return false;
}
}
private Map<String, VirtualNetworkRule> ensureVirtualNetworkRules() {
if (this.virtualNetworkRulesMap == null) {
this.virtualNetworkRulesMap = new HashMap<>();
if (this.innerModel() != null && this.innerModel().virtualNetworkRules() != null) {
for (VirtualNetworkRule virtualNetworkRule : this.innerModel().virtualNetworkRules()) {
this.virtualNetworkRulesMap.put(virtualNetworkRule.id(), virtualNetworkRule);
}
}
}
return this.virtualNetworkRulesMap;
}
@Override
public CosmosDBAccountImpl withVirtualNetwork(String virtualNetworkId, String subnetName) {
this.innerModel().withIsVirtualNetworkFilterEnabled(true);
String vnetId = virtualNetworkId + "/subnets/" + subnetName;
ensureVirtualNetworkRules().put(vnetId, new VirtualNetworkRule().withId(vnetId));
return this;
}
@Override
public CosmosDBAccountImpl withoutVirtualNetwork(String virtualNetworkId, String subnetName) {
Map<String, VirtualNetworkRule> vnetRules = ensureVirtualNetworkRules();
vnetRules.remove(virtualNetworkId + "/subnets/" + subnetName);
if (vnetRules.size() == 0) {
this.innerModel().withIsVirtualNetworkFilterEnabled(false);
}
return this;
}
@Override
public CosmosDBAccountImpl withVirtualNetworkRules(List<VirtualNetworkRule> virtualNetworkRules) {
Map<String, VirtualNetworkRule> vnetRules = ensureVirtualNetworkRules();
if (virtualNetworkRules == null || virtualNetworkRules.isEmpty()) {
vnetRules.clear();
this.innerModel().withIsVirtualNetworkFilterEnabled(false);
return this;
}
this.innerModel().withIsVirtualNetworkFilterEnabled(true);
for (VirtualNetworkRule vnetRule : virtualNetworkRules) {
this.virtualNetworkRulesMap.put(vnetRule.id(), vnetRule);
}
return this;
}
@Override
public CosmosDBAccountImpl withMultipleWriteLocationsEnabled(boolean enabled) {
this.innerModel().withEnableMultipleWriteLocations(enabled);
return this;
}
@Override
public CosmosDBAccountImpl withCassandraConnector(ConnectorOffer connectorOffer) {
this.innerModel().withEnableCassandraConnector(true);
this.innerModel().withConnectorOffer(connectorOffer);
return this;
}
@Override
public CosmosDBAccountImpl withoutCassandraConnector() {
this.innerModel().withEnableCassandraConnector(false);
this.innerModel().withConnectorOffer(null);
return this;
}
@Override
public CosmosDBAccountImpl withDisableKeyBaseMetadataWriteAccess(boolean disabled) {
this.innerModel().withDisableKeyBasedMetadataWriteAccess(disabled);
return this;
}
interface HasLocations {
String location();
List<Location> locations();
void withLocations(List<Location> locations);
}
static class CreateUpdateLocationParameters implements HasLocations {
private DatabaseAccountCreateUpdateParameters parameters;
CreateUpdateLocationParameters(DatabaseAccountCreateUpdateParameters parametersObject) {
parameters = parametersObject;
}
@Override
public String location() {
return parameters.location();
}
@Override
public List<Location> locations() {
return parameters.locations();
}
@Override
public void withLocations(List<Location> locations) {
parameters.withLocations(locations);
}
}
static class UpdateLocationParameters implements HasLocations {
private DatabaseAccountUpdateParameters parameters;
UpdateLocationParameters(DatabaseAccountUpdateParameters parametersObject) {
parameters = parametersObject;
}
@Override
public String location() {
return parameters.location();
}
@Override
public List<Location> locations() {
return parameters.locations();
}
@Override
public void withLocations(List<Location> locations) {
parameters.withLocations(locations);
}
}
} | class CosmosDBAccountImpl
extends GroupableResourceImpl<CosmosDBAccount, DatabaseAccountGetResultsInner, CosmosDBAccountImpl, CosmosManager>
implements CosmosDBAccount, CosmosDBAccount.Definition, CosmosDBAccount.Update {
private List<FailoverPolicy> failoverPolicies;
private boolean hasFailoverPolicyChanges;
private static final int MAX_DELAY_DUE_TO_MISSING_FAILOVERS = 60 * 10;
private Map<String, VirtualNetworkRule> virtualNetworkRulesMap;
private PrivateEndpointConnectionsImpl privateEndpointConnections;
CosmosDBAccountImpl(String name, DatabaseAccountGetResultsInner innerObject, CosmosManager manager) {
super(fixDBName(name), innerObject, manager);
this.failoverPolicies = new ArrayList<>();
this.privateEndpointConnections =
new PrivateEndpointConnectionsImpl(this.manager().serviceClient().getPrivateEndpointConnections(), this);
}
@Override
public DatabaseAccountKind kind() {
return this.innerModel().kind();
}
@Override
public String documentEndpoint() {
return this.innerModel().documentEndpoint();
}
@Override
public DatabaseAccountOfferType databaseAccountOfferType() {
return this.innerModel().databaseAccountOfferType();
}
@Override
public String ipRangeFilter() {
if (CoreUtils.isNullOrEmpty(ipRules())) {
return null;
}
return this.ipRules().stream().map(IpAddressOrRange::ipAddressOrRange).collect(Collectors.joining(","));
}
@Override
public List<IpAddressOrRange> ipRules() {
return Collections.unmodifiableList(this.innerModel().ipRules());
}
@Override
public ConsistencyPolicy consistencyPolicy() {
return this.innerModel().consistencyPolicy();
}
@Override
public DefaultConsistencyLevel defaultConsistencyLevel() {
if (this.innerModel().consistencyPolicy() == null) {
throw new RuntimeException("Consistency policy is missing!");
}
return this.innerModel().consistencyPolicy().defaultConsistencyLevel();
}
@Override
public List<Location> writableReplications() {
return this.innerModel().writeLocations();
}
@Override
public List<Location> readableReplications() {
return this.innerModel().readLocations();
}
@Override
public DatabaseAccountListKeysResult listKeys() {
return this.listKeysAsync().block();
}
@Override
public Mono<DatabaseAccountListKeysResult> listKeysAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListKeysResultImpl::new);
}
@Override
public DatabaseAccountListReadOnlyKeysResult listReadOnlyKeys() {
return this.listReadOnlyKeysAsync().block();
}
@Override
public Mono<DatabaseAccountListReadOnlyKeysResult> listReadOnlyKeysAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listReadOnlyKeysAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListReadOnlyKeysResultImpl::new);
}
@Override
public DatabaseAccountListConnectionStringsResult listConnectionStrings() {
return this.listConnectionStringsAsync().block();
}
@Override
public Mono<DatabaseAccountListConnectionStringsResult> listConnectionStringsAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listConnectionStringsAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListConnectionStringsResultImpl::new);
}
@Override
public List<SqlDatabase> listSqlDatabases() {
return this.listSqlDatabasesAsync().collectList().block();
}
@Override
public PagedFlux<SqlDatabase> listSqlDatabasesAsync() {
return this
.manager()
.serviceClient()
.getSqlResources()
.listSqlDatabasesAsync(this.resourceGroupName(), this.name())
.mapPage(SqlDatabaseImpl::new);
}
@Override
public List<PrivateLinkResource> listPrivateLinkResources() {
return this.listPrivateLinkResourcesAsync().collectList().block();
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
return this
.manager()
.serviceClient()
.getPrivateLinkResources()
.listByDatabaseAccountAsync(this.resourceGroupName(), this.name())
.mapPage(PrivateLinkResourceImpl::new);
}
@Override
public PrivateLinkResource getPrivateLinkResource(String groupName) {
return this.getPrivateLinkResourceAsync(groupName).block();
}
@Override
public Mono<PrivateLinkResource> getPrivateLinkResourceAsync(String groupName) {
return this
.manager()
.serviceClient()
.getPrivateLinkResources()
.getAsync(this.resourceGroupName(), this.name(), groupName)
.map(PrivateLinkResourceImpl::new);
}
@Override
public Map<String, PrivateEndpointConnection> listPrivateEndpointConnection() {
return this.listPrivateEndpointConnectionAsync().block();
}
@Override
public Mono<Map<String, PrivateEndpointConnection>> listPrivateEndpointConnectionAsync() {
return this.privateEndpointConnections.asMapAsync();
}
@Override
public PrivateEndpointConnection getPrivateEndpointConnection(String name) {
return this.getPrivateEndpointConnectionAsync(name).block();
}
@Override
public Mono<PrivateEndpointConnection> getPrivateEndpointConnectionAsync(String name) {
return this
.privateEndpointConnections
.getImplAsync(name)
.map(privateEndpointConnection -> privateEndpointConnection);
}
@Override
public boolean multipleWriteLocationsEnabled() {
return this.innerModel().enableMultipleWriteLocations();
}
@Override
public boolean cassandraConnectorEnabled() {
return this.innerModel().enableCassandraConnector();
}
@Override
public ConnectorOffer cassandraConnectorOffer() {
return this.innerModel().connectorOffer();
}
@Override
public boolean keyBasedMetadataWriteAccessDisabled() {
return this.innerModel().disableKeyBasedMetadataWriteAccess();
}
@Override
public List<Capability> capabilities() {
List<Capability> capabilities = this.innerModel().capabilities();
if (capabilities == null) {
capabilities = new ArrayList<>();
}
return Collections.unmodifiableList(capabilities);
}
@Override
public List<VirtualNetworkRule> virtualNetworkRules() {
List<VirtualNetworkRule> result =
(this.innerModel() != null && this.innerModel().virtualNetworkRules() != null)
? this.innerModel().virtualNetworkRules()
: new ArrayList<VirtualNetworkRule>();
return Collections.unmodifiableList(result);
}
@Override
public void offlineRegion(Region region) {
this.manager().serviceClient().getDatabaseAccounts().offlineRegion(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public Mono<Void> offlineRegionAsync(Region region) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.offlineRegionAsync(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public void onlineRegion(Region region) {
this.manager().serviceClient().getDatabaseAccounts().onlineRegion(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public Mono<Void> onlineRegionAsync(Region region) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.onlineRegionAsync(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public void regenerateKey(KeyKind keyKind) {
this.manager().serviceClient().getDatabaseAccounts().regenerateKey(this.resourceGroupName(), this.name(),
new DatabaseAccountRegenerateKeyParameters().withKeyKind(keyKind));
}
@Override
public Mono<Void> regenerateKeyAsync(KeyKind keyKind) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new DatabaseAccountRegenerateKeyParameters().withKeyKind(keyKind));
}
@Override
public CosmosDBAccountImpl withKind(DatabaseAccountKind kind) {
this.innerModel().withKind(kind);
return this;
}
@Override
public CosmosDBAccountImpl withKind(DatabaseAccountKind kind, Capability... capabilities) {
this.innerModel().withKind(kind);
this.innerModel().withCapabilities(Arrays.asList(capabilities));
return this;
}
@Override
public CosmosDBAccountImpl withDataModelSql() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
return this;
}
@Override
public CosmosDBAccountImpl withDataModelMongoDB() {
this.innerModel().withKind(DatabaseAccountKind.MONGO_DB);
return this;
}
@Override
public CosmosDBAccountImpl withDataModelCassandra() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableCassandra"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Cassandra");
return this;
}
@Override
public CosmosDBAccountImpl withDataModelAzureTable() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableTable"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Table");
return this;
}
@Override
public CosmosDBAccountImpl withDataModelGremlin() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableGremlin"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Graph");
return this;
}
@Override
@Override
public CosmosDBAccountImpl withIpRules(List<IpAddressOrRange> ipRules) {
this.innerModel().withIpRules(ipRules);
return this;
}
@Override
protected Mono<DatabaseAccountGetResultsInner> getInnerAsync() {
return this.manager().serviceClient().getDatabaseAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public CosmosDBAccountImpl withWriteReplication(Region region) {
FailoverPolicy failoverPolicyInner = new FailoverPolicy();
failoverPolicyInner.withLocationName(region.name());
this.hasFailoverPolicyChanges = true;
this.failoverPolicies.add(failoverPolicyInner);
return this;
}
@Override
public CosmosDBAccountImpl withReadReplication(Region region) {
this.ensureFailoverIsInitialized();
FailoverPolicy failoverPolicyInner = new FailoverPolicy();
failoverPolicyInner.withLocationName(region.name());
failoverPolicyInner.withFailoverPriority(this.failoverPolicies.size());
this.hasFailoverPolicyChanges = true;
this.failoverPolicies.add(failoverPolicyInner);
return this;
}
@Override
public CosmosDBAccountImpl withoutReadReplication(Region region) {
this.ensureFailoverIsInitialized();
for (int i = 1; i < this.failoverPolicies.size(); i++) {
if (this.failoverPolicies.get(i).locationName() != null) {
String locName = formatLocationName(this.failoverPolicies.get(i).locationName());
if (locName.equals(region.name())) {
this.failoverPolicies.remove(i);
}
}
}
return this;
}
@Override
public CosmosDBAccountImpl withEventualConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.EVENTUAL, 0, 0);
return this;
}
@Override
public CosmosDBAccountImpl withSessionConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.SESSION, 0, 0);
return this;
}
@Override
public CosmosDBAccountImpl withBoundedStalenessConsistency(long maxStalenessPrefix, int maxIntervalInSeconds) {
this.setConsistencyPolicy(DefaultConsistencyLevel.BOUNDED_STALENESS, maxStalenessPrefix, maxIntervalInSeconds);
return this;
}
@Override
public CosmosDBAccountImpl withStrongConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.STRONG, 0, 0);
return this;
}
@Override
public PrivateEndpointConnectionImpl defineNewPrivateEndpointConnection(String name) {
return this.privateEndpointConnections.define(name);
}
@Override
public PrivateEndpointConnectionImpl updatePrivateEndpointConnection(String name) {
return this.privateEndpointConnections.update(name);
}
@Override
public CosmosDBAccountImpl withoutPrivateEndpointConnection(String name) {
this.privateEndpointConnections.remove(name);
return this;
}
CosmosDBAccountImpl withPrivateEndpointConnection(PrivateEndpointConnectionImpl privateEndpointConnection) {
this.privateEndpointConnections.addPrivateEndpointConnection(privateEndpointConnection);
return this;
}
@Override
public Mono<CosmosDBAccount> createResourceAsync() {
return this.doDatabaseUpdateCreate();
}
private DatabaseAccountCreateUpdateParameters createUpdateParametersInner(DatabaseAccountGetResultsInner inner) {
this.ensureFailoverIsInitialized();
DatabaseAccountCreateUpdateParameters createUpdateParametersInner = new DatabaseAccountCreateUpdateParameters();
createUpdateParametersInner.withLocation(this.regionName().toLowerCase(Locale.ROOT));
createUpdateParametersInner.withConsistencyPolicy(inner.consistencyPolicy());
createUpdateParametersInner.withIpRules(inner.ipRules());
createUpdateParametersInner.withKind(inner.kind());
createUpdateParametersInner.withCapabilities(inner.capabilities());
createUpdateParametersInner.withTags(inner.tags());
createUpdateParametersInner.withEnableMultipleWriteLocations(inner.enableMultipleWriteLocations());
this
.addLocationsForParameters(
new CreateUpdateLocationParameters(createUpdateParametersInner), this.failoverPolicies);
createUpdateParametersInner.withIsVirtualNetworkFilterEnabled(inner.isVirtualNetworkFilterEnabled());
createUpdateParametersInner.withEnableCassandraConnector(inner.enableCassandraConnector());
createUpdateParametersInner.withConnectorOffer(inner.connectorOffer());
createUpdateParametersInner.withEnableAutomaticFailover(inner.enableAutomaticFailover());
createUpdateParametersInner.withDisableKeyBasedMetadataWriteAccess(inner.disableKeyBasedMetadataWriteAccess());
if (this.virtualNetworkRulesMap != null) {
createUpdateParametersInner
.withVirtualNetworkRules(new ArrayList<VirtualNetworkRule>(this.virtualNetworkRulesMap.values()));
this.virtualNetworkRulesMap = null;
}
return createUpdateParametersInner;
}
private DatabaseAccountUpdateParameters updateParametersInner(DatabaseAccountGetResultsInner inner) {
this.ensureFailoverIsInitialized();
DatabaseAccountUpdateParameters updateParameters = new DatabaseAccountUpdateParameters();
updateParameters.withTags(inner.tags());
updateParameters.withLocation(this.regionName().toLowerCase(Locale.ROOT));
updateParameters.withConsistencyPolicy(inner.consistencyPolicy());
updateParameters.withIpRules(inner.ipRules());
updateParameters.withIsVirtualNetworkFilterEnabled(inner.isVirtualNetworkFilterEnabled());
updateParameters.withEnableAutomaticFailover(inner.enableAutomaticFailover());
updateParameters.withCapabilities(inner.capabilities());
updateParameters.withEnableMultipleWriteLocations(inner.enableMultipleWriteLocations());
updateParameters.withEnableCassandraConnector(inner.enableCassandraConnector());
updateParameters.withConnectorOffer(inner.connectorOffer());
updateParameters.withDisableKeyBasedMetadataWriteAccess(inner.disableKeyBasedMetadataWriteAccess());
if (virtualNetworkRulesMap != null) {
updateParameters.withVirtualNetworkRules(new ArrayList<>(this.virtualNetworkRulesMap.values()));
virtualNetworkRulesMap = null;
}
this.addLocationsForParameters(new UpdateLocationParameters(updateParameters), this.failoverPolicies);
return updateParameters;
}
private static String fixDBName(String name) {
return name.toLowerCase(Locale.ROOT);
}
private void setConsistencyPolicy(
DefaultConsistencyLevel level, long maxStalenessPrefix, int maxIntervalInSeconds) {
ConsistencyPolicy policy = new ConsistencyPolicy();
policy.withDefaultConsistencyLevel(level);
if (level == DefaultConsistencyLevel.BOUNDED_STALENESS) {
policy.withMaxStalenessPrefix(maxStalenessPrefix);
policy.withMaxIntervalInSeconds(maxIntervalInSeconds);
}
this.innerModel().withConsistencyPolicy(policy);
}
private void addLocationsForParameters(HasLocations locationParameters, List<FailoverPolicy> failoverPolicies) {
List<Location> locations = new ArrayList<Location>();
if (failoverPolicies.size() > 0) {
for (int i = 0; i < failoverPolicies.size(); i++) {
FailoverPolicy policyInner = failoverPolicies.get(i);
Location location = new Location();
location.withFailoverPriority(i);
location.withLocationName(policyInner.locationName());
locations.add(location);
}
} else {
Location location = new Location();
location.withFailoverPriority(0);
location.withLocationName(locationParameters.location());
locations.add(location);
}
locationParameters.withLocations(locations);
}
private static String formatLocationName(String locationName) {
return locationName.replace(" ", "").toLowerCase(Locale.ROOT);
}
private Mono<CosmosDBAccount> doDatabaseUpdateCreate() {
final CosmosDBAccountImpl self = this;
final List<Integer> data = new ArrayList<Integer>();
data.add(0);
Mono<DatabaseAccountGetResultsInner> request = null;
HasLocations locationParameters = null;
if (isInCreateMode()) {
final DatabaseAccountCreateUpdateParameters createUpdateParametersInner =
this.createUpdateParametersInner(this.innerModel());
request =
this
.manager()
.serviceClient()
.getDatabaseAccounts()
.createOrUpdateAsync(resourceGroupName(), name(), createUpdateParametersInner);
locationParameters = new CreateUpdateLocationParameters(createUpdateParametersInner);
} else {
final DatabaseAccountUpdateParameters updateParametersInner = this.updateParametersInner(this.innerModel());
request =
this
.manager()
.serviceClient()
.getDatabaseAccounts()
.updateAsync(resourceGroupName(), name(), updateParametersInner);
locationParameters = new UpdateLocationParameters(updateParametersInner);
}
Set<String> locations = locationParameters.locations().stream()
.map(location -> formatLocationName(location.locationName()))
.collect(Collectors.toSet());
return request
.flatMap(
databaseAccountInner -> {
self.failoverPolicies.clear();
self.hasFailoverPolicyChanges = false;
return manager()
.databaseAccounts()
.getByResourceGroupAsync(resourceGroupName(), name())
.flatMap(
databaseAccount -> {
if (MAX_DELAY_DUE_TO_MISSING_FAILOVERS > data.get(0)
&& (databaseAccount.id() == null
|| databaseAccount.id().length() == 0
|| locations.size()
!= databaseAccount.innerModel().failoverPolicies().size())) {
return Mono.empty();
}
if (isAFinalProvisioningState(databaseAccount.innerModel().provisioningState())) {
for (Location location : databaseAccount.readableReplications()) {
if (!isAFinalProvisioningState(location.provisioningState())) {
return Mono.empty();
}
if (!locations.contains(formatLocationName(location.locationName()))) {
return Mono.empty();
}
}
} else {
return Mono.empty();
}
self.setInner(databaseAccount.innerModel());
return Mono.just(databaseAccount);
})
.repeatWhenEmpty(
longFlux ->
longFlux
.flatMap(
index -> {
data.set(0, data.get(0) + 30);
return Mono.delay(ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(
manager().serviceClient().getDefaultPollInterval()));
}));
});
}
private void ensureFailoverIsInitialized() {
if (this.isInCreateMode()) {
return;
}
if (!this.hasFailoverPolicyChanges) {
this.failoverPolicies.clear();
FailoverPolicy[] policyInners = new FailoverPolicy[this.innerModel().failoverPolicies().size()];
this.innerModel().failoverPolicies().toArray(policyInners);
Arrays
.sort(
policyInners,
Comparator.comparing(FailoverPolicy::failoverPriority));
for (int i = 0; i < policyInners.length; i++) {
this.failoverPolicies.add(policyInners[i]);
}
this.hasFailoverPolicyChanges = true;
}
}
private boolean isAFinalProvisioningState(String state) {
switch (state.toLowerCase(Locale.ROOT)) {
case "succeeded":
case "canceled":
case "failed":
return true;
default:
return false;
}
}
private Map<String, VirtualNetworkRule> ensureVirtualNetworkRules() {
if (this.virtualNetworkRulesMap == null) {
this.virtualNetworkRulesMap = new HashMap<>();
if (this.innerModel() != null && this.innerModel().virtualNetworkRules() != null) {
for (VirtualNetworkRule virtualNetworkRule : this.innerModel().virtualNetworkRules()) {
this.virtualNetworkRulesMap.put(virtualNetworkRule.id(), virtualNetworkRule);
}
}
}
return this.virtualNetworkRulesMap;
}
@Override
public CosmosDBAccountImpl withVirtualNetwork(String virtualNetworkId, String subnetName) {
this.innerModel().withIsVirtualNetworkFilterEnabled(true);
String vnetId = virtualNetworkId + "/subnets/" + subnetName;
ensureVirtualNetworkRules().put(vnetId, new VirtualNetworkRule().withId(vnetId));
return this;
}
@Override
public CosmosDBAccountImpl withoutVirtualNetwork(String virtualNetworkId, String subnetName) {
Map<String, VirtualNetworkRule> vnetRules = ensureVirtualNetworkRules();
vnetRules.remove(virtualNetworkId + "/subnets/" + subnetName);
if (vnetRules.size() == 0) {
this.innerModel().withIsVirtualNetworkFilterEnabled(false);
}
return this;
}
@Override
public CosmosDBAccountImpl withVirtualNetworkRules(List<VirtualNetworkRule> virtualNetworkRules) {
Map<String, VirtualNetworkRule> vnetRules = ensureVirtualNetworkRules();
if (virtualNetworkRules == null || virtualNetworkRules.isEmpty()) {
vnetRules.clear();
this.innerModel().withIsVirtualNetworkFilterEnabled(false);
return this;
}
this.innerModel().withIsVirtualNetworkFilterEnabled(true);
for (VirtualNetworkRule vnetRule : virtualNetworkRules) {
this.virtualNetworkRulesMap.put(vnetRule.id(), vnetRule);
}
return this;
}
@Override
public CosmosDBAccountImpl withMultipleWriteLocationsEnabled(boolean enabled) {
this.innerModel().withEnableMultipleWriteLocations(enabled);
return this;
}
@Override
public CosmosDBAccountImpl withCassandraConnector(ConnectorOffer connectorOffer) {
this.innerModel().withEnableCassandraConnector(true);
this.innerModel().withConnectorOffer(connectorOffer);
return this;
}
@Override
public CosmosDBAccountImpl withoutCassandraConnector() {
this.innerModel().withEnableCassandraConnector(false);
this.innerModel().withConnectorOffer(null);
return this;
}
@Override
public CosmosDBAccountImpl withDisableKeyBaseMetadataWriteAccess(boolean disabled) {
this.innerModel().withDisableKeyBasedMetadataWriteAccess(disabled);
return this;
}
interface HasLocations {
String location();
List<Location> locations();
void withLocations(List<Location> locations);
}
static class CreateUpdateLocationParameters implements HasLocations {
private DatabaseAccountCreateUpdateParameters parameters;
CreateUpdateLocationParameters(DatabaseAccountCreateUpdateParameters parametersObject) {
parameters = parametersObject;
}
@Override
public String location() {
return parameters.location();
}
@Override
public List<Location> locations() {
return parameters.locations();
}
@Override
public void withLocations(List<Location> locations) {
parameters.withLocations(locations);
}
}
static class UpdateLocationParameters implements HasLocations {
private DatabaseAccountUpdateParameters parameters;
UpdateLocationParameters(DatabaseAccountUpdateParameters parametersObject) {
parameters = parametersObject;
}
@Override
public String location() {
return parameters.location();
}
@Override
public List<Location> locations() {
return parameters.locations();
}
@Override
public void withLocations(List<Location> locations) {
parameters.withLocations(locations);
}
}
} |
Collections.unmodifiableList? | public List<IpAddressOrRange> ipRules() {
return this.innerModel().ipRules();
} | return this.innerModel().ipRules(); | public List<IpAddressOrRange> ipRules() {
return Collections.unmodifiableList(this.innerModel().ipRules());
} | class CosmosDBAccountImpl
extends GroupableResourceImpl<CosmosDBAccount, DatabaseAccountGetResultsInner, CosmosDBAccountImpl, CosmosManager>
implements CosmosDBAccount, CosmosDBAccount.Definition, CosmosDBAccount.Update {
private List<FailoverPolicy> failoverPolicies;
private boolean hasFailoverPolicyChanges;
private static final int MAX_DELAY_DUE_TO_MISSING_FAILOVERS = 60 * 10;
private Map<String, VirtualNetworkRule> virtualNetworkRulesMap;
private PrivateEndpointConnectionsImpl privateEndpointConnections;
CosmosDBAccountImpl(String name, DatabaseAccountGetResultsInner innerObject, CosmosManager manager) {
super(fixDBName(name), innerObject, manager);
this.failoverPolicies = new ArrayList<>();
this.privateEndpointConnections =
new PrivateEndpointConnectionsImpl(this.manager().serviceClient().getPrivateEndpointConnections(), this);
}
@Override
public DatabaseAccountKind kind() {
return this.innerModel().kind();
}
@Override
public String documentEndpoint() {
return this.innerModel().documentEndpoint();
}
@Override
public DatabaseAccountOfferType databaseAccountOfferType() {
return this.innerModel().databaseAccountOfferType();
}
@Override
public String ipRangeFilter() {
return this.ipRules().stream().map(IpAddressOrRange::ipAddressOrRange).collect(Collectors.joining(","));
}
@Override
@Override
public ConsistencyPolicy consistencyPolicy() {
return this.innerModel().consistencyPolicy();
}
@Override
public DefaultConsistencyLevel defaultConsistencyLevel() {
if (this.innerModel().consistencyPolicy() == null) {
throw new RuntimeException("Consistency policy is missing!");
}
return this.innerModel().consistencyPolicy().defaultConsistencyLevel();
}
@Override
public List<Location> writableReplications() {
return this.innerModel().writeLocations();
}
@Override
public List<Location> readableReplications() {
return this.innerModel().readLocations();
}
@Override
public DatabaseAccountListKeysResult listKeys() {
return this.listKeysAsync().block();
}
@Override
public Mono<DatabaseAccountListKeysResult> listKeysAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListKeysResultImpl::new);
}
@Override
public DatabaseAccountListReadOnlyKeysResult listReadOnlyKeys() {
return this.listReadOnlyKeysAsync().block();
}
@Override
public Mono<DatabaseAccountListReadOnlyKeysResult> listReadOnlyKeysAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listReadOnlyKeysAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListReadOnlyKeysResultImpl::new);
}
@Override
public DatabaseAccountListConnectionStringsResult listConnectionStrings() {
return this.listConnectionStringsAsync().block();
}
@Override
public Mono<DatabaseAccountListConnectionStringsResult> listConnectionStringsAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listConnectionStringsAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListConnectionStringsResultImpl::new);
}
@Override
public List<SqlDatabase> listSqlDatabases() {
return this.listSqlDatabasesAsync().collectList().block();
}
@Override
public PagedFlux<SqlDatabase> listSqlDatabasesAsync() {
return this
.manager()
.serviceClient()
.getSqlResources()
.listSqlDatabasesAsync(this.resourceGroupName(), this.name())
.mapPage(SqlDatabaseImpl::new);
}
@Override
public List<PrivateLinkResource> listPrivateLinkResources() {
return this.listPrivateLinkResourcesAsync().collectList().block();
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
return this
.manager()
.serviceClient()
.getPrivateLinkResources()
.listByDatabaseAccountAsync(this.resourceGroupName(), this.name())
.mapPage(PrivateLinkResourceImpl::new);
}
@Override
public PrivateLinkResource getPrivateLinkResource(String groupName) {
return this.getPrivateLinkResourceAsync(groupName).block();
}
@Override
public Mono<PrivateLinkResource> getPrivateLinkResourceAsync(String groupName) {
return this
.manager()
.serviceClient()
.getPrivateLinkResources()
.getAsync(this.resourceGroupName(), this.name(), groupName)
.map(PrivateLinkResourceImpl::new);
}
@Override
public Map<String, PrivateEndpointConnection> listPrivateEndpointConnection() {
return this.listPrivateEndpointConnectionAsync().block();
}
@Override
public Mono<Map<String, PrivateEndpointConnection>> listPrivateEndpointConnectionAsync() {
return this.privateEndpointConnections.asMapAsync();
}
@Override
public PrivateEndpointConnection getPrivateEndpointConnection(String name) {
return this.getPrivateEndpointConnectionAsync(name).block();
}
@Override
public Mono<PrivateEndpointConnection> getPrivateEndpointConnectionAsync(String name) {
return this
.privateEndpointConnections
.getImplAsync(name)
.map(privateEndpointConnection -> privateEndpointConnection);
}
@Override
public boolean multipleWriteLocationsEnabled() {
return this.innerModel().enableMultipleWriteLocations();
}
@Override
public boolean cassandraConnectorEnabled() {
return this.innerModel().enableCassandraConnector();
}
@Override
public ConnectorOffer cassandraConnectorOffer() {
return this.innerModel().connectorOffer();
}
@Override
public boolean keyBasedMetadataWriteAccessDisabled() {
return this.innerModel().disableKeyBasedMetadataWriteAccess();
}
@Override
public List<Capability> capabilities() {
List<Capability> capabilities = this.innerModel().capabilities();
if (capabilities == null) {
capabilities = new ArrayList<>();
}
return Collections.unmodifiableList(capabilities);
}
@Override
public List<VirtualNetworkRule> virtualNetworkRules() {
List<VirtualNetworkRule> result =
(this.innerModel() != null && this.innerModel().virtualNetworkRules() != null)
? this.innerModel().virtualNetworkRules()
: new ArrayList<VirtualNetworkRule>();
return Collections.unmodifiableList(result);
}
@Override
public void offlineRegion(Region region) {
this.manager().serviceClient().getDatabaseAccounts().offlineRegion(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public Mono<Void> offlineRegionAsync(Region region) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.offlineRegionAsync(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public void onlineRegion(Region region) {
this.manager().serviceClient().getDatabaseAccounts().onlineRegion(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public Mono<Void> onlineRegionAsync(Region region) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.onlineRegionAsync(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public void regenerateKey(KeyKind keyKind) {
this.manager().serviceClient().getDatabaseAccounts().regenerateKey(this.resourceGroupName(), this.name(),
new DatabaseAccountRegenerateKeyParameters().withKeyKind(keyKind));
}
@Override
public Mono<Void> regenerateKeyAsync(KeyKind keyKind) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new DatabaseAccountRegenerateKeyParameters().withKeyKind(keyKind));
}
@Override
public CosmosDBAccountImpl withKind(DatabaseAccountKind kind) {
this.innerModel().withKind(kind);
return this;
}
@Override
public CosmosDBAccountImpl withKind(DatabaseAccountKind kind, Capability... capabilities) {
this.innerModel().withKind(kind);
this.innerModel().withCapabilities(Arrays.asList(capabilities));
return this;
}
@Override
public CosmosDBAccountImpl withDataModelSql() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
return this;
}
@Override
public CosmosDBAccountImpl withDataModelMongoDB() {
this.innerModel().withKind(DatabaseAccountKind.MONGO_DB);
return this;
}
@Override
public CosmosDBAccountImpl withDataModelCassandra() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableCassandra"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Cassandra");
return this;
}
@Override
public CosmosDBAccountImpl withDataModelAzureTable() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableTable"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Table");
return this;
}
@Override
public CosmosDBAccountImpl withDataModelGremlin() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableGremlin"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Graph");
return this;
}
@Override
public CosmosDBAccountImpl withIpRangeFilter(String ipRangeFilter) {
List<IpAddressOrRange> rules = new ArrayList<>();
for (String ip : ipRangeFilter.split(",")) {
rules.add(new IpAddressOrRange().withIpAddressOrRange(ip));
}
this.innerModel().withIpRules(rules);
return this;
}
@Override
public CosmosDBAccountImpl withIpRules(List<IpAddressOrRange> ipRules) {
this.innerModel().withIpRules(ipRules);
return this;
}
@Override
protected Mono<DatabaseAccountGetResultsInner> getInnerAsync() {
return this.manager().serviceClient().getDatabaseAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public CosmosDBAccountImpl withWriteReplication(Region region) {
FailoverPolicy failoverPolicyInner = new FailoverPolicy();
failoverPolicyInner.withLocationName(region.name());
this.hasFailoverPolicyChanges = true;
this.failoverPolicies.add(failoverPolicyInner);
return this;
}
@Override
public CosmosDBAccountImpl withReadReplication(Region region) {
this.ensureFailoverIsInitialized();
FailoverPolicy failoverPolicyInner = new FailoverPolicy();
failoverPolicyInner.withLocationName(region.name());
failoverPolicyInner.withFailoverPriority(this.failoverPolicies.size());
this.hasFailoverPolicyChanges = true;
this.failoverPolicies.add(failoverPolicyInner);
return this;
}
@Override
public CosmosDBAccountImpl withoutReadReplication(Region region) {
this.ensureFailoverIsInitialized();
for (int i = 1; i < this.failoverPolicies.size(); i++) {
if (this.failoverPolicies.get(i).locationName() != null) {
String locName = formatLocationName(this.failoverPolicies.get(i).locationName());
if (locName.equals(region.name())) {
this.failoverPolicies.remove(i);
}
}
}
return this;
}
@Override
public CosmosDBAccountImpl withEventualConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.EVENTUAL, 0, 0);
return this;
}
@Override
public CosmosDBAccountImpl withSessionConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.SESSION, 0, 0);
return this;
}
@Override
public CosmosDBAccountImpl withBoundedStalenessConsistency(long maxStalenessPrefix, int maxIntervalInSeconds) {
this.setConsistencyPolicy(DefaultConsistencyLevel.BOUNDED_STALENESS, maxStalenessPrefix, maxIntervalInSeconds);
return this;
}
@Override
public CosmosDBAccountImpl withStrongConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.STRONG, 0, 0);
return this;
}
@Override
public PrivateEndpointConnectionImpl defineNewPrivateEndpointConnection(String name) {
return this.privateEndpointConnections.define(name);
}
@Override
public PrivateEndpointConnectionImpl updatePrivateEndpointConnection(String name) {
return this.privateEndpointConnections.update(name);
}
@Override
public CosmosDBAccountImpl withoutPrivateEndpointConnection(String name) {
this.privateEndpointConnections.remove(name);
return this;
}
CosmosDBAccountImpl withPrivateEndpointConnection(PrivateEndpointConnectionImpl privateEndpointConnection) {
this.privateEndpointConnections.addPrivateEndpointConnection(privateEndpointConnection);
return this;
}
@Override
public Mono<CosmosDBAccount> createResourceAsync() {
return this.doDatabaseUpdateCreate();
}
private DatabaseAccountCreateUpdateParameters createUpdateParametersInner(DatabaseAccountGetResultsInner inner) {
this.ensureFailoverIsInitialized();
DatabaseAccountCreateUpdateParameters createUpdateParametersInner = new DatabaseAccountCreateUpdateParameters();
createUpdateParametersInner.withLocation(this.regionName().toLowerCase(Locale.ROOT));
createUpdateParametersInner.withConsistencyPolicy(inner.consistencyPolicy());
createUpdateParametersInner.withIpRules(inner.ipRules());
createUpdateParametersInner.withKind(inner.kind());
createUpdateParametersInner.withCapabilities(inner.capabilities());
createUpdateParametersInner.withTags(inner.tags());
createUpdateParametersInner.withEnableMultipleWriteLocations(inner.enableMultipleWriteLocations());
this
.addLocationsForParameters(
new CreateUpdateLocationParameters(createUpdateParametersInner), this.failoverPolicies);
createUpdateParametersInner.withIsVirtualNetworkFilterEnabled(inner.isVirtualNetworkFilterEnabled());
createUpdateParametersInner.withEnableCassandraConnector(inner.enableCassandraConnector());
createUpdateParametersInner.withConnectorOffer(inner.connectorOffer());
createUpdateParametersInner.withEnableAutomaticFailover(inner.enableAutomaticFailover());
createUpdateParametersInner.withDisableKeyBasedMetadataWriteAccess(inner.disableKeyBasedMetadataWriteAccess());
if (this.virtualNetworkRulesMap != null) {
createUpdateParametersInner
.withVirtualNetworkRules(new ArrayList<VirtualNetworkRule>(this.virtualNetworkRulesMap.values()));
this.virtualNetworkRulesMap = null;
}
return createUpdateParametersInner;
}
private DatabaseAccountUpdateParameters updateParametersInner(DatabaseAccountGetResultsInner inner) {
this.ensureFailoverIsInitialized();
DatabaseAccountUpdateParameters updateParameters = new DatabaseAccountUpdateParameters();
updateParameters.withTags(inner.tags());
updateParameters.withLocation(this.regionName().toLowerCase(Locale.ROOT));
updateParameters.withConsistencyPolicy(inner.consistencyPolicy());
updateParameters.withIpRules(inner.ipRules());
updateParameters.withIsVirtualNetworkFilterEnabled(inner.isVirtualNetworkFilterEnabled());
updateParameters.withEnableAutomaticFailover(inner.enableAutomaticFailover());
updateParameters.withCapabilities(inner.capabilities());
updateParameters.withEnableMultipleWriteLocations(inner.enableMultipleWriteLocations());
updateParameters.withEnableCassandraConnector(inner.enableCassandraConnector());
updateParameters.withConnectorOffer(inner.connectorOffer());
updateParameters.withDisableKeyBasedMetadataWriteAccess(inner.disableKeyBasedMetadataWriteAccess());
if (virtualNetworkRulesMap != null) {
updateParameters.withVirtualNetworkRules(new ArrayList<>(this.virtualNetworkRulesMap.values()));
virtualNetworkRulesMap = null;
}
this.addLocationsForParameters(new UpdateLocationParameters(updateParameters), this.failoverPolicies);
return updateParameters;
}
private static String fixDBName(String name) {
return name.toLowerCase(Locale.ROOT);
}
private void setConsistencyPolicy(
DefaultConsistencyLevel level, long maxStalenessPrefix, int maxIntervalInSeconds) {
ConsistencyPolicy policy = new ConsistencyPolicy();
policy.withDefaultConsistencyLevel(level);
if (level == DefaultConsistencyLevel.BOUNDED_STALENESS) {
policy.withMaxStalenessPrefix(maxStalenessPrefix);
policy.withMaxIntervalInSeconds(maxIntervalInSeconds);
}
this.innerModel().withConsistencyPolicy(policy);
}
private void addLocationsForParameters(HasLocations locationParameters, List<FailoverPolicy> failoverPolicies) {
List<Location> locations = new ArrayList<Location>();
if (failoverPolicies.size() > 0) {
for (int i = 0; i < failoverPolicies.size(); i++) {
FailoverPolicy policyInner = failoverPolicies.get(i);
Location location = new Location();
location.withFailoverPriority(i);
location.withLocationName(policyInner.locationName());
locations.add(location);
}
} else {
Location location = new Location();
location.withFailoverPriority(0);
location.withLocationName(locationParameters.location());
locations.add(location);
}
locationParameters.withLocations(locations);
}
private static String formatLocationName(String locationName) {
return locationName.replace(" ", "").toLowerCase(Locale.ROOT);
}
private Mono<CosmosDBAccount> doDatabaseUpdateCreate() {
final CosmosDBAccountImpl self = this;
final List<Integer> data = new ArrayList<Integer>();
data.add(0);
Mono<DatabaseAccountGetResultsInner> request = null;
HasLocations locationParameters = null;
if (isInCreateMode()) {
final DatabaseAccountCreateUpdateParameters createUpdateParametersInner =
this.createUpdateParametersInner(this.innerModel());
request =
this
.manager()
.serviceClient()
.getDatabaseAccounts()
.createOrUpdateAsync(resourceGroupName(), name(), createUpdateParametersInner);
locationParameters = new CreateUpdateLocationParameters(createUpdateParametersInner);
} else {
final DatabaseAccountUpdateParameters updateParametersInner = this.updateParametersInner(this.innerModel());
request =
this
.manager()
.serviceClient()
.getDatabaseAccounts()
.updateAsync(resourceGroupName(), name(), updateParametersInner);
locationParameters = new UpdateLocationParameters(updateParametersInner);
}
Set<String> locations = locationParameters.locations().stream()
.map(location -> formatLocationName(location.locationName()))
.collect(Collectors.toSet());
return request
.flatMap(
databaseAccountInner -> {
self.failoverPolicies.clear();
self.hasFailoverPolicyChanges = false;
return manager()
.databaseAccounts()
.getByResourceGroupAsync(resourceGroupName(), name())
.flatMap(
databaseAccount -> {
if (MAX_DELAY_DUE_TO_MISSING_FAILOVERS > data.get(0)
&& (databaseAccount.id() == null
|| databaseAccount.id().length() == 0
|| locations.size()
!= databaseAccount.innerModel().failoverPolicies().size())) {
return Mono.empty();
}
if (isAFinalProvisioningState(databaseAccount.innerModel().provisioningState())) {
for (Location location : databaseAccount.readableReplications()) {
if (!isAFinalProvisioningState(location.provisioningState())) {
return Mono.empty();
}
if (!locations.contains(formatLocationName(location.locationName()))) {
return Mono.empty();
}
}
} else {
return Mono.empty();
}
self.setInner(databaseAccount.innerModel());
return Mono.just(databaseAccount);
})
.repeatWhenEmpty(
longFlux ->
longFlux
.flatMap(
index -> {
data.set(0, data.get(0) + 30);
return Mono.delay(ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(
manager().serviceClient().getDefaultPollInterval()));
}));
});
}
private void ensureFailoverIsInitialized() {
if (this.isInCreateMode()) {
return;
}
if (!this.hasFailoverPolicyChanges) {
this.failoverPolicies.clear();
FailoverPolicy[] policyInners = new FailoverPolicy[this.innerModel().failoverPolicies().size()];
this.innerModel().failoverPolicies().toArray(policyInners);
Arrays
.sort(
policyInners,
Comparator.comparing(FailoverPolicy::failoverPriority));
for (int i = 0; i < policyInners.length; i++) {
this.failoverPolicies.add(policyInners[i]);
}
this.hasFailoverPolicyChanges = true;
}
}
private boolean isAFinalProvisioningState(String state) {
switch (state.toLowerCase(Locale.ROOT)) {
case "succeeded":
case "canceled":
case "failed":
return true;
default:
return false;
}
}
private Map<String, VirtualNetworkRule> ensureVirtualNetworkRules() {
if (this.virtualNetworkRulesMap == null) {
this.virtualNetworkRulesMap = new HashMap<>();
if (this.innerModel() != null && this.innerModel().virtualNetworkRules() != null) {
for (VirtualNetworkRule virtualNetworkRule : this.innerModel().virtualNetworkRules()) {
this.virtualNetworkRulesMap.put(virtualNetworkRule.id(), virtualNetworkRule);
}
}
}
return this.virtualNetworkRulesMap;
}
@Override
public CosmosDBAccountImpl withVirtualNetwork(String virtualNetworkId, String subnetName) {
this.innerModel().withIsVirtualNetworkFilterEnabled(true);
String vnetId = virtualNetworkId + "/subnets/" + subnetName;
ensureVirtualNetworkRules().put(vnetId, new VirtualNetworkRule().withId(vnetId));
return this;
}
@Override
public CosmosDBAccountImpl withoutVirtualNetwork(String virtualNetworkId, String subnetName) {
Map<String, VirtualNetworkRule> vnetRules = ensureVirtualNetworkRules();
vnetRules.remove(virtualNetworkId + "/subnets/" + subnetName);
if (vnetRules.size() == 0) {
this.innerModel().withIsVirtualNetworkFilterEnabled(false);
}
return this;
}
@Override
public CosmosDBAccountImpl withVirtualNetworkRules(List<VirtualNetworkRule> virtualNetworkRules) {
Map<String, VirtualNetworkRule> vnetRules = ensureVirtualNetworkRules();
if (virtualNetworkRules == null || virtualNetworkRules.isEmpty()) {
vnetRules.clear();
this.innerModel().withIsVirtualNetworkFilterEnabled(false);
return this;
}
this.innerModel().withIsVirtualNetworkFilterEnabled(true);
for (VirtualNetworkRule vnetRule : virtualNetworkRules) {
this.virtualNetworkRulesMap.put(vnetRule.id(), vnetRule);
}
return this;
}
@Override
public CosmosDBAccountImpl withMultipleWriteLocationsEnabled(boolean enabled) {
this.innerModel().withEnableMultipleWriteLocations(enabled);
return this;
}
@Override
public CosmosDBAccountImpl withCassandraConnector(ConnectorOffer connectorOffer) {
this.innerModel().withEnableCassandraConnector(true);
this.innerModel().withConnectorOffer(connectorOffer);
return this;
}
@Override
public CosmosDBAccountImpl withoutCassandraConnector() {
this.innerModel().withEnableCassandraConnector(false);
this.innerModel().withConnectorOffer(null);
return this;
}
@Override
public CosmosDBAccountImpl withDisableKeyBaseMetadataWriteAccess(boolean disabled) {
this.innerModel().withDisableKeyBasedMetadataWriteAccess(disabled);
return this;
}
interface HasLocations {
String location();
List<Location> locations();
void withLocations(List<Location> locations);
}
static class CreateUpdateLocationParameters implements HasLocations {
private DatabaseAccountCreateUpdateParameters parameters;
CreateUpdateLocationParameters(DatabaseAccountCreateUpdateParameters parametersObject) {
parameters = parametersObject;
}
@Override
public String location() {
return parameters.location();
}
@Override
public List<Location> locations() {
return parameters.locations();
}
@Override
public void withLocations(List<Location> locations) {
parameters.withLocations(locations);
}
}
static class UpdateLocationParameters implements HasLocations {
private DatabaseAccountUpdateParameters parameters;
UpdateLocationParameters(DatabaseAccountUpdateParameters parametersObject) {
parameters = parametersObject;
}
@Override
public String location() {
return parameters.location();
}
@Override
public List<Location> locations() {
return parameters.locations();
}
@Override
public void withLocations(List<Location> locations) {
parameters.withLocations(locations);
}
}
} | class CosmosDBAccountImpl
extends GroupableResourceImpl<CosmosDBAccount, DatabaseAccountGetResultsInner, CosmosDBAccountImpl, CosmosManager>
implements CosmosDBAccount, CosmosDBAccount.Definition, CosmosDBAccount.Update {
private List<FailoverPolicy> failoverPolicies;
private boolean hasFailoverPolicyChanges;
private static final int MAX_DELAY_DUE_TO_MISSING_FAILOVERS = 60 * 10;
private Map<String, VirtualNetworkRule> virtualNetworkRulesMap;
private PrivateEndpointConnectionsImpl privateEndpointConnections;
CosmosDBAccountImpl(String name, DatabaseAccountGetResultsInner innerObject, CosmosManager manager) {
super(fixDBName(name), innerObject, manager);
this.failoverPolicies = new ArrayList<>();
this.privateEndpointConnections =
new PrivateEndpointConnectionsImpl(this.manager().serviceClient().getPrivateEndpointConnections(), this);
}
@Override
public DatabaseAccountKind kind() {
return this.innerModel().kind();
}
@Override
public String documentEndpoint() {
return this.innerModel().documentEndpoint();
}
@Override
public DatabaseAccountOfferType databaseAccountOfferType() {
return this.innerModel().databaseAccountOfferType();
}
@Override
public String ipRangeFilter() {
if (CoreUtils.isNullOrEmpty(ipRules())) {
return null;
}
return this.ipRules().stream().map(IpAddressOrRange::ipAddressOrRange).collect(Collectors.joining(","));
}
@Override
@Override
public ConsistencyPolicy consistencyPolicy() {
return this.innerModel().consistencyPolicy();
}
@Override
public DefaultConsistencyLevel defaultConsistencyLevel() {
if (this.innerModel().consistencyPolicy() == null) {
throw new RuntimeException("Consistency policy is missing!");
}
return this.innerModel().consistencyPolicy().defaultConsistencyLevel();
}
@Override
public List<Location> writableReplications() {
return this.innerModel().writeLocations();
}
@Override
public List<Location> readableReplications() {
return this.innerModel().readLocations();
}
@Override
public DatabaseAccountListKeysResult listKeys() {
return this.listKeysAsync().block();
}
@Override
public Mono<DatabaseAccountListKeysResult> listKeysAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListKeysResultImpl::new);
}
@Override
public DatabaseAccountListReadOnlyKeysResult listReadOnlyKeys() {
return this.listReadOnlyKeysAsync().block();
}
@Override
public Mono<DatabaseAccountListReadOnlyKeysResult> listReadOnlyKeysAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listReadOnlyKeysAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListReadOnlyKeysResultImpl::new);
}
@Override
public DatabaseAccountListConnectionStringsResult listConnectionStrings() {
return this.listConnectionStringsAsync().block();
}
@Override
public Mono<DatabaseAccountListConnectionStringsResult> listConnectionStringsAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listConnectionStringsAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListConnectionStringsResultImpl::new);
}
@Override
public List<SqlDatabase> listSqlDatabases() {
return this.listSqlDatabasesAsync().collectList().block();
}
@Override
public PagedFlux<SqlDatabase> listSqlDatabasesAsync() {
return this
.manager()
.serviceClient()
.getSqlResources()
.listSqlDatabasesAsync(this.resourceGroupName(), this.name())
.mapPage(SqlDatabaseImpl::new);
}
@Override
public List<PrivateLinkResource> listPrivateLinkResources() {
return this.listPrivateLinkResourcesAsync().collectList().block();
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
return this
.manager()
.serviceClient()
.getPrivateLinkResources()
.listByDatabaseAccountAsync(this.resourceGroupName(), this.name())
.mapPage(PrivateLinkResourceImpl::new);
}
@Override
public PrivateLinkResource getPrivateLinkResource(String groupName) {
return this.getPrivateLinkResourceAsync(groupName).block();
}
@Override
public Mono<PrivateLinkResource> getPrivateLinkResourceAsync(String groupName) {
return this
.manager()
.serviceClient()
.getPrivateLinkResources()
.getAsync(this.resourceGroupName(), this.name(), groupName)
.map(PrivateLinkResourceImpl::new);
}
@Override
public Map<String, PrivateEndpointConnection> listPrivateEndpointConnection() {
return this.listPrivateEndpointConnectionAsync().block();
}
@Override
public Mono<Map<String, PrivateEndpointConnection>> listPrivateEndpointConnectionAsync() {
return this.privateEndpointConnections.asMapAsync();
}
@Override
public PrivateEndpointConnection getPrivateEndpointConnection(String name) {
return this.getPrivateEndpointConnectionAsync(name).block();
}
@Override
public Mono<PrivateEndpointConnection> getPrivateEndpointConnectionAsync(String name) {
return this
.privateEndpointConnections
.getImplAsync(name)
.map(privateEndpointConnection -> privateEndpointConnection);
}
@Override
public boolean multipleWriteLocationsEnabled() {
return this.innerModel().enableMultipleWriteLocations();
}
@Override
public boolean cassandraConnectorEnabled() {
return this.innerModel().enableCassandraConnector();
}
@Override
public ConnectorOffer cassandraConnectorOffer() {
return this.innerModel().connectorOffer();
}
@Override
public boolean keyBasedMetadataWriteAccessDisabled() {
return this.innerModel().disableKeyBasedMetadataWriteAccess();
}
@Override
public List<Capability> capabilities() {
List<Capability> capabilities = this.innerModel().capabilities();
if (capabilities == null) {
capabilities = new ArrayList<>();
}
return Collections.unmodifiableList(capabilities);
}
@Override
public List<VirtualNetworkRule> virtualNetworkRules() {
List<VirtualNetworkRule> result =
(this.innerModel() != null && this.innerModel().virtualNetworkRules() != null)
? this.innerModel().virtualNetworkRules()
: new ArrayList<VirtualNetworkRule>();
return Collections.unmodifiableList(result);
}
@Override
public void offlineRegion(Region region) {
this.manager().serviceClient().getDatabaseAccounts().offlineRegion(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public Mono<Void> offlineRegionAsync(Region region) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.offlineRegionAsync(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public void onlineRegion(Region region) {
this.manager().serviceClient().getDatabaseAccounts().onlineRegion(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public Mono<Void> onlineRegionAsync(Region region) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.onlineRegionAsync(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public void regenerateKey(KeyKind keyKind) {
this.manager().serviceClient().getDatabaseAccounts().regenerateKey(this.resourceGroupName(), this.name(),
new DatabaseAccountRegenerateKeyParameters().withKeyKind(keyKind));
}
@Override
public Mono<Void> regenerateKeyAsync(KeyKind keyKind) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new DatabaseAccountRegenerateKeyParameters().withKeyKind(keyKind));
}
@Override
public CosmosDBAccountImpl withKind(DatabaseAccountKind kind) {
this.innerModel().withKind(kind);
return this;
}
@Override
public CosmosDBAccountImpl withKind(DatabaseAccountKind kind, Capability... capabilities) {
this.innerModel().withKind(kind);
this.innerModel().withCapabilities(Arrays.asList(capabilities));
return this;
}
@Override
public CosmosDBAccountImpl withDataModelSql() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
return this;
}
@Override
public CosmosDBAccountImpl withDataModelMongoDB() {
this.innerModel().withKind(DatabaseAccountKind.MONGO_DB);
return this;
}
@Override
public CosmosDBAccountImpl withDataModelCassandra() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableCassandra"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Cassandra");
return this;
}
@Override
public CosmosDBAccountImpl withDataModelAzureTable() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableTable"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Table");
return this;
}
@Override
public CosmosDBAccountImpl withDataModelGremlin() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableGremlin"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Graph");
return this;
}
@Override
public CosmosDBAccountImpl withIpRangeFilter(String ipRangeFilter) {
List<IpAddressOrRange> rules = new ArrayList<>();
if (!CoreUtils.isNullOrEmpty(ipRangeFilter)) {
for (String ip : ipRangeFilter.split(",")) {
rules.add(new IpAddressOrRange().withIpAddressOrRange(ip));
}
}
this.innerModel().withIpRules(rules);
return this;
}
@Override
public CosmosDBAccountImpl withIpRules(List<IpAddressOrRange> ipRules) {
this.innerModel().withIpRules(ipRules);
return this;
}
@Override
protected Mono<DatabaseAccountGetResultsInner> getInnerAsync() {
return this.manager().serviceClient().getDatabaseAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public CosmosDBAccountImpl withWriteReplication(Region region) {
FailoverPolicy failoverPolicyInner = new FailoverPolicy();
failoverPolicyInner.withLocationName(region.name());
this.hasFailoverPolicyChanges = true;
this.failoverPolicies.add(failoverPolicyInner);
return this;
}
@Override
public CosmosDBAccountImpl withReadReplication(Region region) {
this.ensureFailoverIsInitialized();
FailoverPolicy failoverPolicyInner = new FailoverPolicy();
failoverPolicyInner.withLocationName(region.name());
failoverPolicyInner.withFailoverPriority(this.failoverPolicies.size());
this.hasFailoverPolicyChanges = true;
this.failoverPolicies.add(failoverPolicyInner);
return this;
}
@Override
public CosmosDBAccountImpl withoutReadReplication(Region region) {
this.ensureFailoverIsInitialized();
for (int i = 1; i < this.failoverPolicies.size(); i++) {
if (this.failoverPolicies.get(i).locationName() != null) {
String locName = formatLocationName(this.failoverPolicies.get(i).locationName());
if (locName.equals(region.name())) {
this.failoverPolicies.remove(i);
}
}
}
return this;
}
@Override
public CosmosDBAccountImpl withEventualConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.EVENTUAL, 0, 0);
return this;
}
@Override
public CosmosDBAccountImpl withSessionConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.SESSION, 0, 0);
return this;
}
@Override
public CosmosDBAccountImpl withBoundedStalenessConsistency(long maxStalenessPrefix, int maxIntervalInSeconds) {
this.setConsistencyPolicy(DefaultConsistencyLevel.BOUNDED_STALENESS, maxStalenessPrefix, maxIntervalInSeconds);
return this;
}
@Override
public CosmosDBAccountImpl withStrongConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.STRONG, 0, 0);
return this;
}
@Override
public PrivateEndpointConnectionImpl defineNewPrivateEndpointConnection(String name) {
return this.privateEndpointConnections.define(name);
}
@Override
public PrivateEndpointConnectionImpl updatePrivateEndpointConnection(String name) {
return this.privateEndpointConnections.update(name);
}
@Override
public CosmosDBAccountImpl withoutPrivateEndpointConnection(String name) {
this.privateEndpointConnections.remove(name);
return this;
}
CosmosDBAccountImpl withPrivateEndpointConnection(PrivateEndpointConnectionImpl privateEndpointConnection) {
this.privateEndpointConnections.addPrivateEndpointConnection(privateEndpointConnection);
return this;
}
@Override
public Mono<CosmosDBAccount> createResourceAsync() {
return this.doDatabaseUpdateCreate();
}
private DatabaseAccountCreateUpdateParameters createUpdateParametersInner(DatabaseAccountGetResultsInner inner) {
this.ensureFailoverIsInitialized();
DatabaseAccountCreateUpdateParameters createUpdateParametersInner = new DatabaseAccountCreateUpdateParameters();
createUpdateParametersInner.withLocation(this.regionName().toLowerCase(Locale.ROOT));
createUpdateParametersInner.withConsistencyPolicy(inner.consistencyPolicy());
createUpdateParametersInner.withIpRules(inner.ipRules());
createUpdateParametersInner.withKind(inner.kind());
createUpdateParametersInner.withCapabilities(inner.capabilities());
createUpdateParametersInner.withTags(inner.tags());
createUpdateParametersInner.withEnableMultipleWriteLocations(inner.enableMultipleWriteLocations());
this
.addLocationsForParameters(
new CreateUpdateLocationParameters(createUpdateParametersInner), this.failoverPolicies);
createUpdateParametersInner.withIsVirtualNetworkFilterEnabled(inner.isVirtualNetworkFilterEnabled());
createUpdateParametersInner.withEnableCassandraConnector(inner.enableCassandraConnector());
createUpdateParametersInner.withConnectorOffer(inner.connectorOffer());
createUpdateParametersInner.withEnableAutomaticFailover(inner.enableAutomaticFailover());
createUpdateParametersInner.withDisableKeyBasedMetadataWriteAccess(inner.disableKeyBasedMetadataWriteAccess());
if (this.virtualNetworkRulesMap != null) {
createUpdateParametersInner
.withVirtualNetworkRules(new ArrayList<VirtualNetworkRule>(this.virtualNetworkRulesMap.values()));
this.virtualNetworkRulesMap = null;
}
return createUpdateParametersInner;
}
private DatabaseAccountUpdateParameters updateParametersInner(DatabaseAccountGetResultsInner inner) {
this.ensureFailoverIsInitialized();
DatabaseAccountUpdateParameters updateParameters = new DatabaseAccountUpdateParameters();
updateParameters.withTags(inner.tags());
updateParameters.withLocation(this.regionName().toLowerCase(Locale.ROOT));
updateParameters.withConsistencyPolicy(inner.consistencyPolicy());
updateParameters.withIpRules(inner.ipRules());
updateParameters.withIsVirtualNetworkFilterEnabled(inner.isVirtualNetworkFilterEnabled());
updateParameters.withEnableAutomaticFailover(inner.enableAutomaticFailover());
updateParameters.withCapabilities(inner.capabilities());
updateParameters.withEnableMultipleWriteLocations(inner.enableMultipleWriteLocations());
updateParameters.withEnableCassandraConnector(inner.enableCassandraConnector());
updateParameters.withConnectorOffer(inner.connectorOffer());
updateParameters.withDisableKeyBasedMetadataWriteAccess(inner.disableKeyBasedMetadataWriteAccess());
if (virtualNetworkRulesMap != null) {
updateParameters.withVirtualNetworkRules(new ArrayList<>(this.virtualNetworkRulesMap.values()));
virtualNetworkRulesMap = null;
}
this.addLocationsForParameters(new UpdateLocationParameters(updateParameters), this.failoverPolicies);
return updateParameters;
}
private static String fixDBName(String name) {
return name.toLowerCase(Locale.ROOT);
}
private void setConsistencyPolicy(
DefaultConsistencyLevel level, long maxStalenessPrefix, int maxIntervalInSeconds) {
ConsistencyPolicy policy = new ConsistencyPolicy();
policy.withDefaultConsistencyLevel(level);
if (level == DefaultConsistencyLevel.BOUNDED_STALENESS) {
policy.withMaxStalenessPrefix(maxStalenessPrefix);
policy.withMaxIntervalInSeconds(maxIntervalInSeconds);
}
this.innerModel().withConsistencyPolicy(policy);
}
private void addLocationsForParameters(HasLocations locationParameters, List<FailoverPolicy> failoverPolicies) {
List<Location> locations = new ArrayList<Location>();
if (failoverPolicies.size() > 0) {
for (int i = 0; i < failoverPolicies.size(); i++) {
FailoverPolicy policyInner = failoverPolicies.get(i);
Location location = new Location();
location.withFailoverPriority(i);
location.withLocationName(policyInner.locationName());
locations.add(location);
}
} else {
Location location = new Location();
location.withFailoverPriority(0);
location.withLocationName(locationParameters.location());
locations.add(location);
}
locationParameters.withLocations(locations);
}
private static String formatLocationName(String locationName) {
return locationName.replace(" ", "").toLowerCase(Locale.ROOT);
}
private Mono<CosmosDBAccount> doDatabaseUpdateCreate() {
final CosmosDBAccountImpl self = this;
final List<Integer> data = new ArrayList<Integer>();
data.add(0);
Mono<DatabaseAccountGetResultsInner> request = null;
HasLocations locationParameters = null;
if (isInCreateMode()) {
final DatabaseAccountCreateUpdateParameters createUpdateParametersInner =
this.createUpdateParametersInner(this.innerModel());
request =
this
.manager()
.serviceClient()
.getDatabaseAccounts()
.createOrUpdateAsync(resourceGroupName(), name(), createUpdateParametersInner);
locationParameters = new CreateUpdateLocationParameters(createUpdateParametersInner);
} else {
final DatabaseAccountUpdateParameters updateParametersInner = this.updateParametersInner(this.innerModel());
request =
this
.manager()
.serviceClient()
.getDatabaseAccounts()
.updateAsync(resourceGroupName(), name(), updateParametersInner);
locationParameters = new UpdateLocationParameters(updateParametersInner);
}
Set<String> locations = locationParameters.locations().stream()
.map(location -> formatLocationName(location.locationName()))
.collect(Collectors.toSet());
return request
.flatMap(
databaseAccountInner -> {
self.failoverPolicies.clear();
self.hasFailoverPolicyChanges = false;
return manager()
.databaseAccounts()
.getByResourceGroupAsync(resourceGroupName(), name())
.flatMap(
databaseAccount -> {
if (MAX_DELAY_DUE_TO_MISSING_FAILOVERS > data.get(0)
&& (databaseAccount.id() == null
|| databaseAccount.id().length() == 0
|| locations.size()
!= databaseAccount.innerModel().failoverPolicies().size())) {
return Mono.empty();
}
if (isAFinalProvisioningState(databaseAccount.innerModel().provisioningState())) {
for (Location location : databaseAccount.readableReplications()) {
if (!isAFinalProvisioningState(location.provisioningState())) {
return Mono.empty();
}
if (!locations.contains(formatLocationName(location.locationName()))) {
return Mono.empty();
}
}
} else {
return Mono.empty();
}
self.setInner(databaseAccount.innerModel());
return Mono.just(databaseAccount);
})
.repeatWhenEmpty(
longFlux ->
longFlux
.flatMap(
index -> {
data.set(0, data.get(0) + 30);
return Mono.delay(ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(
manager().serviceClient().getDefaultPollInterval()));
}));
});
}
private void ensureFailoverIsInitialized() {
if (this.isInCreateMode()) {
return;
}
if (!this.hasFailoverPolicyChanges) {
this.failoverPolicies.clear();
FailoverPolicy[] policyInners = new FailoverPolicy[this.innerModel().failoverPolicies().size()];
this.innerModel().failoverPolicies().toArray(policyInners);
Arrays
.sort(
policyInners,
Comparator.comparing(FailoverPolicy::failoverPriority));
for (int i = 0; i < policyInners.length; i++) {
this.failoverPolicies.add(policyInners[i]);
}
this.hasFailoverPolicyChanges = true;
}
}
private boolean isAFinalProvisioningState(String state) {
switch (state.toLowerCase(Locale.ROOT)) {
case "succeeded":
case "canceled":
case "failed":
return true;
default:
return false;
}
}
private Map<String, VirtualNetworkRule> ensureVirtualNetworkRules() {
if (this.virtualNetworkRulesMap == null) {
this.virtualNetworkRulesMap = new HashMap<>();
if (this.innerModel() != null && this.innerModel().virtualNetworkRules() != null) {
for (VirtualNetworkRule virtualNetworkRule : this.innerModel().virtualNetworkRules()) {
this.virtualNetworkRulesMap.put(virtualNetworkRule.id(), virtualNetworkRule);
}
}
}
return this.virtualNetworkRulesMap;
}
@Override
public CosmosDBAccountImpl withVirtualNetwork(String virtualNetworkId, String subnetName) {
this.innerModel().withIsVirtualNetworkFilterEnabled(true);
String vnetId = virtualNetworkId + "/subnets/" + subnetName;
ensureVirtualNetworkRules().put(vnetId, new VirtualNetworkRule().withId(vnetId));
return this;
}
@Override
public CosmosDBAccountImpl withoutVirtualNetwork(String virtualNetworkId, String subnetName) {
Map<String, VirtualNetworkRule> vnetRules = ensureVirtualNetworkRules();
vnetRules.remove(virtualNetworkId + "/subnets/" + subnetName);
if (vnetRules.size() == 0) {
this.innerModel().withIsVirtualNetworkFilterEnabled(false);
}
return this;
}
@Override
public CosmosDBAccountImpl withVirtualNetworkRules(List<VirtualNetworkRule> virtualNetworkRules) {
Map<String, VirtualNetworkRule> vnetRules = ensureVirtualNetworkRules();
if (virtualNetworkRules == null || virtualNetworkRules.isEmpty()) {
vnetRules.clear();
this.innerModel().withIsVirtualNetworkFilterEnabled(false);
return this;
}
this.innerModel().withIsVirtualNetworkFilterEnabled(true);
for (VirtualNetworkRule vnetRule : virtualNetworkRules) {
this.virtualNetworkRulesMap.put(vnetRule.id(), vnetRule);
}
return this;
}
@Override
public CosmosDBAccountImpl withMultipleWriteLocationsEnabled(boolean enabled) {
this.innerModel().withEnableMultipleWriteLocations(enabled);
return this;
}
@Override
public CosmosDBAccountImpl withCassandraConnector(ConnectorOffer connectorOffer) {
this.innerModel().withEnableCassandraConnector(true);
this.innerModel().withConnectorOffer(connectorOffer);
return this;
}
@Override
public CosmosDBAccountImpl withoutCassandraConnector() {
this.innerModel().withEnableCassandraConnector(false);
this.innerModel().withConnectorOffer(null);
return this;
}
@Override
public CosmosDBAccountImpl withDisableKeyBaseMetadataWriteAccess(boolean disabled) {
this.innerModel().withDisableKeyBasedMetadataWriteAccess(disabled);
return this;
}
interface HasLocations {
String location();
List<Location> locations();
void withLocations(List<Location> locations);
}
static class CreateUpdateLocationParameters implements HasLocations {
private DatabaseAccountCreateUpdateParameters parameters;
CreateUpdateLocationParameters(DatabaseAccountCreateUpdateParameters parametersObject) {
parameters = parametersObject;
}
@Override
public String location() {
return parameters.location();
}
@Override
public List<Location> locations() {
return parameters.locations();
}
@Override
public void withLocations(List<Location> locations) {
parameters.withLocations(locations);
}
}
static class UpdateLocationParameters implements HasLocations {
private DatabaseAccountUpdateParameters parameters;
UpdateLocationParameters(DatabaseAccountUpdateParameters parametersObject) {
parameters = parametersObject;
}
@Override
public String location() {
return parameters.location();
}
@Override
public List<Location> locations() {
return parameters.locations();
}
@Override
public void withLocations(List<Location> locations) {
parameters.withLocations(locations);
}
}
} |
done | public List<IpAddressOrRange> ipRules() {
return this.innerModel().ipRules();
} | return this.innerModel().ipRules(); | public List<IpAddressOrRange> ipRules() {
return Collections.unmodifiableList(this.innerModel().ipRules());
} | class CosmosDBAccountImpl
extends GroupableResourceImpl<CosmosDBAccount, DatabaseAccountGetResultsInner, CosmosDBAccountImpl, CosmosManager>
implements CosmosDBAccount, CosmosDBAccount.Definition, CosmosDBAccount.Update {
private List<FailoverPolicy> failoverPolicies;
private boolean hasFailoverPolicyChanges;
private static final int MAX_DELAY_DUE_TO_MISSING_FAILOVERS = 60 * 10;
private Map<String, VirtualNetworkRule> virtualNetworkRulesMap;
private PrivateEndpointConnectionsImpl privateEndpointConnections;
CosmosDBAccountImpl(String name, DatabaseAccountGetResultsInner innerObject, CosmosManager manager) {
super(fixDBName(name), innerObject, manager);
this.failoverPolicies = new ArrayList<>();
this.privateEndpointConnections =
new PrivateEndpointConnectionsImpl(this.manager().serviceClient().getPrivateEndpointConnections(), this);
}
@Override
public DatabaseAccountKind kind() {
return this.innerModel().kind();
}
@Override
public String documentEndpoint() {
return this.innerModel().documentEndpoint();
}
@Override
public DatabaseAccountOfferType databaseAccountOfferType() {
return this.innerModel().databaseAccountOfferType();
}
@Override
public String ipRangeFilter() {
return this.ipRules().stream().map(IpAddressOrRange::ipAddressOrRange).collect(Collectors.joining(","));
}
@Override
@Override
public ConsistencyPolicy consistencyPolicy() {
return this.innerModel().consistencyPolicy();
}
@Override
public DefaultConsistencyLevel defaultConsistencyLevel() {
if (this.innerModel().consistencyPolicy() == null) {
throw new RuntimeException("Consistency policy is missing!");
}
return this.innerModel().consistencyPolicy().defaultConsistencyLevel();
}
@Override
public List<Location> writableReplications() {
return this.innerModel().writeLocations();
}
@Override
public List<Location> readableReplications() {
return this.innerModel().readLocations();
}
@Override
public DatabaseAccountListKeysResult listKeys() {
return this.listKeysAsync().block();
}
@Override
public Mono<DatabaseAccountListKeysResult> listKeysAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListKeysResultImpl::new);
}
@Override
public DatabaseAccountListReadOnlyKeysResult listReadOnlyKeys() {
return this.listReadOnlyKeysAsync().block();
}
@Override
public Mono<DatabaseAccountListReadOnlyKeysResult> listReadOnlyKeysAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listReadOnlyKeysAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListReadOnlyKeysResultImpl::new);
}
@Override
public DatabaseAccountListConnectionStringsResult listConnectionStrings() {
return this.listConnectionStringsAsync().block();
}
@Override
public Mono<DatabaseAccountListConnectionStringsResult> listConnectionStringsAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listConnectionStringsAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListConnectionStringsResultImpl::new);
}
@Override
public List<SqlDatabase> listSqlDatabases() {
return this.listSqlDatabasesAsync().collectList().block();
}
@Override
public PagedFlux<SqlDatabase> listSqlDatabasesAsync() {
return this
.manager()
.serviceClient()
.getSqlResources()
.listSqlDatabasesAsync(this.resourceGroupName(), this.name())
.mapPage(SqlDatabaseImpl::new);
}
@Override
public List<PrivateLinkResource> listPrivateLinkResources() {
return this.listPrivateLinkResourcesAsync().collectList().block();
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
return this
.manager()
.serviceClient()
.getPrivateLinkResources()
.listByDatabaseAccountAsync(this.resourceGroupName(), this.name())
.mapPage(PrivateLinkResourceImpl::new);
}
@Override
public PrivateLinkResource getPrivateLinkResource(String groupName) {
return this.getPrivateLinkResourceAsync(groupName).block();
}
@Override
public Mono<PrivateLinkResource> getPrivateLinkResourceAsync(String groupName) {
return this
.manager()
.serviceClient()
.getPrivateLinkResources()
.getAsync(this.resourceGroupName(), this.name(), groupName)
.map(PrivateLinkResourceImpl::new);
}
@Override
public Map<String, PrivateEndpointConnection> listPrivateEndpointConnection() {
return this.listPrivateEndpointConnectionAsync().block();
}
@Override
public Mono<Map<String, PrivateEndpointConnection>> listPrivateEndpointConnectionAsync() {
return this.privateEndpointConnections.asMapAsync();
}
@Override
public PrivateEndpointConnection getPrivateEndpointConnection(String name) {
return this.getPrivateEndpointConnectionAsync(name).block();
}
@Override
public Mono<PrivateEndpointConnection> getPrivateEndpointConnectionAsync(String name) {
return this
.privateEndpointConnections
.getImplAsync(name)
.map(privateEndpointConnection -> privateEndpointConnection);
}
@Override
public boolean multipleWriteLocationsEnabled() {
return this.innerModel().enableMultipleWriteLocations();
}
@Override
public boolean cassandraConnectorEnabled() {
return this.innerModel().enableCassandraConnector();
}
@Override
public ConnectorOffer cassandraConnectorOffer() {
return this.innerModel().connectorOffer();
}
@Override
public boolean keyBasedMetadataWriteAccessDisabled() {
return this.innerModel().disableKeyBasedMetadataWriteAccess();
}
@Override
public List<Capability> capabilities() {
List<Capability> capabilities = this.innerModel().capabilities();
if (capabilities == null) {
capabilities = new ArrayList<>();
}
return Collections.unmodifiableList(capabilities);
}
@Override
public List<VirtualNetworkRule> virtualNetworkRules() {
List<VirtualNetworkRule> result =
(this.innerModel() != null && this.innerModel().virtualNetworkRules() != null)
? this.innerModel().virtualNetworkRules()
: new ArrayList<VirtualNetworkRule>();
return Collections.unmodifiableList(result);
}
@Override
public void offlineRegion(Region region) {
this.manager().serviceClient().getDatabaseAccounts().offlineRegion(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public Mono<Void> offlineRegionAsync(Region region) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.offlineRegionAsync(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public void onlineRegion(Region region) {
this.manager().serviceClient().getDatabaseAccounts().onlineRegion(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public Mono<Void> onlineRegionAsync(Region region) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.onlineRegionAsync(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public void regenerateKey(KeyKind keyKind) {
this.manager().serviceClient().getDatabaseAccounts().regenerateKey(this.resourceGroupName(), this.name(),
new DatabaseAccountRegenerateKeyParameters().withKeyKind(keyKind));
}
@Override
public Mono<Void> regenerateKeyAsync(KeyKind keyKind) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new DatabaseAccountRegenerateKeyParameters().withKeyKind(keyKind));
}
@Override
public CosmosDBAccountImpl withKind(DatabaseAccountKind kind) {
this.innerModel().withKind(kind);
return this;
}
@Override
public CosmosDBAccountImpl withKind(DatabaseAccountKind kind, Capability... capabilities) {
this.innerModel().withKind(kind);
this.innerModel().withCapabilities(Arrays.asList(capabilities));
return this;
}
@Override
public CosmosDBAccountImpl withDataModelSql() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
return this;
}
@Override
public CosmosDBAccountImpl withDataModelMongoDB() {
this.innerModel().withKind(DatabaseAccountKind.MONGO_DB);
return this;
}
@Override
public CosmosDBAccountImpl withDataModelCassandra() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableCassandra"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Cassandra");
return this;
}
@Override
public CosmosDBAccountImpl withDataModelAzureTable() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableTable"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Table");
return this;
}
@Override
public CosmosDBAccountImpl withDataModelGremlin() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableGremlin"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Graph");
return this;
}
@Override
public CosmosDBAccountImpl withIpRangeFilter(String ipRangeFilter) {
List<IpAddressOrRange> rules = new ArrayList<>();
for (String ip : ipRangeFilter.split(",")) {
rules.add(new IpAddressOrRange().withIpAddressOrRange(ip));
}
this.innerModel().withIpRules(rules);
return this;
}
@Override
public CosmosDBAccountImpl withIpRules(List<IpAddressOrRange> ipRules) {
this.innerModel().withIpRules(ipRules);
return this;
}
@Override
protected Mono<DatabaseAccountGetResultsInner> getInnerAsync() {
return this.manager().serviceClient().getDatabaseAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public CosmosDBAccountImpl withWriteReplication(Region region) {
FailoverPolicy failoverPolicyInner = new FailoverPolicy();
failoverPolicyInner.withLocationName(region.name());
this.hasFailoverPolicyChanges = true;
this.failoverPolicies.add(failoverPolicyInner);
return this;
}
@Override
public CosmosDBAccountImpl withReadReplication(Region region) {
this.ensureFailoverIsInitialized();
FailoverPolicy failoverPolicyInner = new FailoverPolicy();
failoverPolicyInner.withLocationName(region.name());
failoverPolicyInner.withFailoverPriority(this.failoverPolicies.size());
this.hasFailoverPolicyChanges = true;
this.failoverPolicies.add(failoverPolicyInner);
return this;
}
@Override
public CosmosDBAccountImpl withoutReadReplication(Region region) {
this.ensureFailoverIsInitialized();
for (int i = 1; i < this.failoverPolicies.size(); i++) {
if (this.failoverPolicies.get(i).locationName() != null) {
String locName = formatLocationName(this.failoverPolicies.get(i).locationName());
if (locName.equals(region.name())) {
this.failoverPolicies.remove(i);
}
}
}
return this;
}
@Override
public CosmosDBAccountImpl withEventualConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.EVENTUAL, 0, 0);
return this;
}
@Override
public CosmosDBAccountImpl withSessionConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.SESSION, 0, 0);
return this;
}
@Override
public CosmosDBAccountImpl withBoundedStalenessConsistency(long maxStalenessPrefix, int maxIntervalInSeconds) {
this.setConsistencyPolicy(DefaultConsistencyLevel.BOUNDED_STALENESS, maxStalenessPrefix, maxIntervalInSeconds);
return this;
}
@Override
public CosmosDBAccountImpl withStrongConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.STRONG, 0, 0);
return this;
}
@Override
public PrivateEndpointConnectionImpl defineNewPrivateEndpointConnection(String name) {
return this.privateEndpointConnections.define(name);
}
@Override
public PrivateEndpointConnectionImpl updatePrivateEndpointConnection(String name) {
return this.privateEndpointConnections.update(name);
}
@Override
public CosmosDBAccountImpl withoutPrivateEndpointConnection(String name) {
this.privateEndpointConnections.remove(name);
return this;
}
CosmosDBAccountImpl withPrivateEndpointConnection(PrivateEndpointConnectionImpl privateEndpointConnection) {
this.privateEndpointConnections.addPrivateEndpointConnection(privateEndpointConnection);
return this;
}
@Override
public Mono<CosmosDBAccount> createResourceAsync() {
return this.doDatabaseUpdateCreate();
}
private DatabaseAccountCreateUpdateParameters createUpdateParametersInner(DatabaseAccountGetResultsInner inner) {
this.ensureFailoverIsInitialized();
DatabaseAccountCreateUpdateParameters createUpdateParametersInner = new DatabaseAccountCreateUpdateParameters();
createUpdateParametersInner.withLocation(this.regionName().toLowerCase(Locale.ROOT));
createUpdateParametersInner.withConsistencyPolicy(inner.consistencyPolicy());
createUpdateParametersInner.withIpRules(inner.ipRules());
createUpdateParametersInner.withKind(inner.kind());
createUpdateParametersInner.withCapabilities(inner.capabilities());
createUpdateParametersInner.withTags(inner.tags());
createUpdateParametersInner.withEnableMultipleWriteLocations(inner.enableMultipleWriteLocations());
this
.addLocationsForParameters(
new CreateUpdateLocationParameters(createUpdateParametersInner), this.failoverPolicies);
createUpdateParametersInner.withIsVirtualNetworkFilterEnabled(inner.isVirtualNetworkFilterEnabled());
createUpdateParametersInner.withEnableCassandraConnector(inner.enableCassandraConnector());
createUpdateParametersInner.withConnectorOffer(inner.connectorOffer());
createUpdateParametersInner.withEnableAutomaticFailover(inner.enableAutomaticFailover());
createUpdateParametersInner.withDisableKeyBasedMetadataWriteAccess(inner.disableKeyBasedMetadataWriteAccess());
if (this.virtualNetworkRulesMap != null) {
createUpdateParametersInner
.withVirtualNetworkRules(new ArrayList<VirtualNetworkRule>(this.virtualNetworkRulesMap.values()));
this.virtualNetworkRulesMap = null;
}
return createUpdateParametersInner;
}
private DatabaseAccountUpdateParameters updateParametersInner(DatabaseAccountGetResultsInner inner) {
this.ensureFailoverIsInitialized();
DatabaseAccountUpdateParameters updateParameters = new DatabaseAccountUpdateParameters();
updateParameters.withTags(inner.tags());
updateParameters.withLocation(this.regionName().toLowerCase(Locale.ROOT));
updateParameters.withConsistencyPolicy(inner.consistencyPolicy());
updateParameters.withIpRules(inner.ipRules());
updateParameters.withIsVirtualNetworkFilterEnabled(inner.isVirtualNetworkFilterEnabled());
updateParameters.withEnableAutomaticFailover(inner.enableAutomaticFailover());
updateParameters.withCapabilities(inner.capabilities());
updateParameters.withEnableMultipleWriteLocations(inner.enableMultipleWriteLocations());
updateParameters.withEnableCassandraConnector(inner.enableCassandraConnector());
updateParameters.withConnectorOffer(inner.connectorOffer());
updateParameters.withDisableKeyBasedMetadataWriteAccess(inner.disableKeyBasedMetadataWriteAccess());
if (virtualNetworkRulesMap != null) {
updateParameters.withVirtualNetworkRules(new ArrayList<>(this.virtualNetworkRulesMap.values()));
virtualNetworkRulesMap = null;
}
this.addLocationsForParameters(new UpdateLocationParameters(updateParameters), this.failoverPolicies);
return updateParameters;
}
private static String fixDBName(String name) {
return name.toLowerCase(Locale.ROOT);
}
private void setConsistencyPolicy(
DefaultConsistencyLevel level, long maxStalenessPrefix, int maxIntervalInSeconds) {
ConsistencyPolicy policy = new ConsistencyPolicy();
policy.withDefaultConsistencyLevel(level);
if (level == DefaultConsistencyLevel.BOUNDED_STALENESS) {
policy.withMaxStalenessPrefix(maxStalenessPrefix);
policy.withMaxIntervalInSeconds(maxIntervalInSeconds);
}
this.innerModel().withConsistencyPolicy(policy);
}
private void addLocationsForParameters(HasLocations locationParameters, List<FailoverPolicy> failoverPolicies) {
List<Location> locations = new ArrayList<Location>();
if (failoverPolicies.size() > 0) {
for (int i = 0; i < failoverPolicies.size(); i++) {
FailoverPolicy policyInner = failoverPolicies.get(i);
Location location = new Location();
location.withFailoverPriority(i);
location.withLocationName(policyInner.locationName());
locations.add(location);
}
} else {
Location location = new Location();
location.withFailoverPriority(0);
location.withLocationName(locationParameters.location());
locations.add(location);
}
locationParameters.withLocations(locations);
}
private static String formatLocationName(String locationName) {
return locationName.replace(" ", "").toLowerCase(Locale.ROOT);
}
private Mono<CosmosDBAccount> doDatabaseUpdateCreate() {
final CosmosDBAccountImpl self = this;
final List<Integer> data = new ArrayList<Integer>();
data.add(0);
Mono<DatabaseAccountGetResultsInner> request = null;
HasLocations locationParameters = null;
if (isInCreateMode()) {
final DatabaseAccountCreateUpdateParameters createUpdateParametersInner =
this.createUpdateParametersInner(this.innerModel());
request =
this
.manager()
.serviceClient()
.getDatabaseAccounts()
.createOrUpdateAsync(resourceGroupName(), name(), createUpdateParametersInner);
locationParameters = new CreateUpdateLocationParameters(createUpdateParametersInner);
} else {
final DatabaseAccountUpdateParameters updateParametersInner = this.updateParametersInner(this.innerModel());
request =
this
.manager()
.serviceClient()
.getDatabaseAccounts()
.updateAsync(resourceGroupName(), name(), updateParametersInner);
locationParameters = new UpdateLocationParameters(updateParametersInner);
}
Set<String> locations = locationParameters.locations().stream()
.map(location -> formatLocationName(location.locationName()))
.collect(Collectors.toSet());
return request
.flatMap(
databaseAccountInner -> {
self.failoverPolicies.clear();
self.hasFailoverPolicyChanges = false;
return manager()
.databaseAccounts()
.getByResourceGroupAsync(resourceGroupName(), name())
.flatMap(
databaseAccount -> {
if (MAX_DELAY_DUE_TO_MISSING_FAILOVERS > data.get(0)
&& (databaseAccount.id() == null
|| databaseAccount.id().length() == 0
|| locations.size()
!= databaseAccount.innerModel().failoverPolicies().size())) {
return Mono.empty();
}
if (isAFinalProvisioningState(databaseAccount.innerModel().provisioningState())) {
for (Location location : databaseAccount.readableReplications()) {
if (!isAFinalProvisioningState(location.provisioningState())) {
return Mono.empty();
}
if (!locations.contains(formatLocationName(location.locationName()))) {
return Mono.empty();
}
}
} else {
return Mono.empty();
}
self.setInner(databaseAccount.innerModel());
return Mono.just(databaseAccount);
})
.repeatWhenEmpty(
longFlux ->
longFlux
.flatMap(
index -> {
data.set(0, data.get(0) + 30);
return Mono.delay(ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(
manager().serviceClient().getDefaultPollInterval()));
}));
});
}
private void ensureFailoverIsInitialized() {
if (this.isInCreateMode()) {
return;
}
if (!this.hasFailoverPolicyChanges) {
this.failoverPolicies.clear();
FailoverPolicy[] policyInners = new FailoverPolicy[this.innerModel().failoverPolicies().size()];
this.innerModel().failoverPolicies().toArray(policyInners);
Arrays
.sort(
policyInners,
Comparator.comparing(FailoverPolicy::failoverPriority));
for (int i = 0; i < policyInners.length; i++) {
this.failoverPolicies.add(policyInners[i]);
}
this.hasFailoverPolicyChanges = true;
}
}
private boolean isAFinalProvisioningState(String state) {
switch (state.toLowerCase(Locale.ROOT)) {
case "succeeded":
case "canceled":
case "failed":
return true;
default:
return false;
}
}
private Map<String, VirtualNetworkRule> ensureVirtualNetworkRules() {
if (this.virtualNetworkRulesMap == null) {
this.virtualNetworkRulesMap = new HashMap<>();
if (this.innerModel() != null && this.innerModel().virtualNetworkRules() != null) {
for (VirtualNetworkRule virtualNetworkRule : this.innerModel().virtualNetworkRules()) {
this.virtualNetworkRulesMap.put(virtualNetworkRule.id(), virtualNetworkRule);
}
}
}
return this.virtualNetworkRulesMap;
}
@Override
public CosmosDBAccountImpl withVirtualNetwork(String virtualNetworkId, String subnetName) {
this.innerModel().withIsVirtualNetworkFilterEnabled(true);
String vnetId = virtualNetworkId + "/subnets/" + subnetName;
ensureVirtualNetworkRules().put(vnetId, new VirtualNetworkRule().withId(vnetId));
return this;
}
@Override
public CosmosDBAccountImpl withoutVirtualNetwork(String virtualNetworkId, String subnetName) {
Map<String, VirtualNetworkRule> vnetRules = ensureVirtualNetworkRules();
vnetRules.remove(virtualNetworkId + "/subnets/" + subnetName);
if (vnetRules.size() == 0) {
this.innerModel().withIsVirtualNetworkFilterEnabled(false);
}
return this;
}
@Override
public CosmosDBAccountImpl withVirtualNetworkRules(List<VirtualNetworkRule> virtualNetworkRules) {
Map<String, VirtualNetworkRule> vnetRules = ensureVirtualNetworkRules();
if (virtualNetworkRules == null || virtualNetworkRules.isEmpty()) {
vnetRules.clear();
this.innerModel().withIsVirtualNetworkFilterEnabled(false);
return this;
}
this.innerModel().withIsVirtualNetworkFilterEnabled(true);
for (VirtualNetworkRule vnetRule : virtualNetworkRules) {
this.virtualNetworkRulesMap.put(vnetRule.id(), vnetRule);
}
return this;
}
@Override
public CosmosDBAccountImpl withMultipleWriteLocationsEnabled(boolean enabled) {
this.innerModel().withEnableMultipleWriteLocations(enabled);
return this;
}
@Override
public CosmosDBAccountImpl withCassandraConnector(ConnectorOffer connectorOffer) {
this.innerModel().withEnableCassandraConnector(true);
this.innerModel().withConnectorOffer(connectorOffer);
return this;
}
@Override
public CosmosDBAccountImpl withoutCassandraConnector() {
this.innerModel().withEnableCassandraConnector(false);
this.innerModel().withConnectorOffer(null);
return this;
}
@Override
public CosmosDBAccountImpl withDisableKeyBaseMetadataWriteAccess(boolean disabled) {
this.innerModel().withDisableKeyBasedMetadataWriteAccess(disabled);
return this;
}
interface HasLocations {
String location();
List<Location> locations();
void withLocations(List<Location> locations);
}
static class CreateUpdateLocationParameters implements HasLocations {
private DatabaseAccountCreateUpdateParameters parameters;
CreateUpdateLocationParameters(DatabaseAccountCreateUpdateParameters parametersObject) {
parameters = parametersObject;
}
@Override
public String location() {
return parameters.location();
}
@Override
public List<Location> locations() {
return parameters.locations();
}
@Override
public void withLocations(List<Location> locations) {
parameters.withLocations(locations);
}
}
static class UpdateLocationParameters implements HasLocations {
private DatabaseAccountUpdateParameters parameters;
UpdateLocationParameters(DatabaseAccountUpdateParameters parametersObject) {
parameters = parametersObject;
}
@Override
public String location() {
return parameters.location();
}
@Override
public List<Location> locations() {
return parameters.locations();
}
@Override
public void withLocations(List<Location> locations) {
parameters.withLocations(locations);
}
}
} | class CosmosDBAccountImpl
extends GroupableResourceImpl<CosmosDBAccount, DatabaseAccountGetResultsInner, CosmosDBAccountImpl, CosmosManager>
implements CosmosDBAccount, CosmosDBAccount.Definition, CosmosDBAccount.Update {
private List<FailoverPolicy> failoverPolicies;
private boolean hasFailoverPolicyChanges;
private static final int MAX_DELAY_DUE_TO_MISSING_FAILOVERS = 60 * 10;
private Map<String, VirtualNetworkRule> virtualNetworkRulesMap;
private PrivateEndpointConnectionsImpl privateEndpointConnections;
CosmosDBAccountImpl(String name, DatabaseAccountGetResultsInner innerObject, CosmosManager manager) {
super(fixDBName(name), innerObject, manager);
this.failoverPolicies = new ArrayList<>();
this.privateEndpointConnections =
new PrivateEndpointConnectionsImpl(this.manager().serviceClient().getPrivateEndpointConnections(), this);
}
@Override
public DatabaseAccountKind kind() {
return this.innerModel().kind();
}
@Override
public String documentEndpoint() {
return this.innerModel().documentEndpoint();
}
@Override
public DatabaseAccountOfferType databaseAccountOfferType() {
return this.innerModel().databaseAccountOfferType();
}
@Override
public String ipRangeFilter() {
if (CoreUtils.isNullOrEmpty(ipRules())) {
return null;
}
return this.ipRules().stream().map(IpAddressOrRange::ipAddressOrRange).collect(Collectors.joining(","));
}
@Override
@Override
public ConsistencyPolicy consistencyPolicy() {
return this.innerModel().consistencyPolicy();
}
@Override
public DefaultConsistencyLevel defaultConsistencyLevel() {
if (this.innerModel().consistencyPolicy() == null) {
throw new RuntimeException("Consistency policy is missing!");
}
return this.innerModel().consistencyPolicy().defaultConsistencyLevel();
}
@Override
public List<Location> writableReplications() {
return this.innerModel().writeLocations();
}
@Override
public List<Location> readableReplications() {
return this.innerModel().readLocations();
}
@Override
public DatabaseAccountListKeysResult listKeys() {
return this.listKeysAsync().block();
}
@Override
public Mono<DatabaseAccountListKeysResult> listKeysAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListKeysResultImpl::new);
}
@Override
public DatabaseAccountListReadOnlyKeysResult listReadOnlyKeys() {
return this.listReadOnlyKeysAsync().block();
}
@Override
public Mono<DatabaseAccountListReadOnlyKeysResult> listReadOnlyKeysAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listReadOnlyKeysAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListReadOnlyKeysResultImpl::new);
}
@Override
public DatabaseAccountListConnectionStringsResult listConnectionStrings() {
return this.listConnectionStringsAsync().block();
}
@Override
public Mono<DatabaseAccountListConnectionStringsResult> listConnectionStringsAsync() {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.listConnectionStringsAsync(this.resourceGroupName(), this.name())
.map(
DatabaseAccountListConnectionStringsResultImpl::new);
}
@Override
public List<SqlDatabase> listSqlDatabases() {
return this.listSqlDatabasesAsync().collectList().block();
}
@Override
public PagedFlux<SqlDatabase> listSqlDatabasesAsync() {
return this
.manager()
.serviceClient()
.getSqlResources()
.listSqlDatabasesAsync(this.resourceGroupName(), this.name())
.mapPage(SqlDatabaseImpl::new);
}
@Override
public List<PrivateLinkResource> listPrivateLinkResources() {
return this.listPrivateLinkResourcesAsync().collectList().block();
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
return this
.manager()
.serviceClient()
.getPrivateLinkResources()
.listByDatabaseAccountAsync(this.resourceGroupName(), this.name())
.mapPage(PrivateLinkResourceImpl::new);
}
@Override
public PrivateLinkResource getPrivateLinkResource(String groupName) {
return this.getPrivateLinkResourceAsync(groupName).block();
}
@Override
public Mono<PrivateLinkResource> getPrivateLinkResourceAsync(String groupName) {
return this
.manager()
.serviceClient()
.getPrivateLinkResources()
.getAsync(this.resourceGroupName(), this.name(), groupName)
.map(PrivateLinkResourceImpl::new);
}
@Override
public Map<String, PrivateEndpointConnection> listPrivateEndpointConnection() {
return this.listPrivateEndpointConnectionAsync().block();
}
@Override
public Mono<Map<String, PrivateEndpointConnection>> listPrivateEndpointConnectionAsync() {
return this.privateEndpointConnections.asMapAsync();
}
@Override
public PrivateEndpointConnection getPrivateEndpointConnection(String name) {
return this.getPrivateEndpointConnectionAsync(name).block();
}
@Override
public Mono<PrivateEndpointConnection> getPrivateEndpointConnectionAsync(String name) {
return this
.privateEndpointConnections
.getImplAsync(name)
.map(privateEndpointConnection -> privateEndpointConnection);
}
@Override
public boolean multipleWriteLocationsEnabled() {
return this.innerModel().enableMultipleWriteLocations();
}
@Override
public boolean cassandraConnectorEnabled() {
return this.innerModel().enableCassandraConnector();
}
@Override
public ConnectorOffer cassandraConnectorOffer() {
return this.innerModel().connectorOffer();
}
@Override
public boolean keyBasedMetadataWriteAccessDisabled() {
return this.innerModel().disableKeyBasedMetadataWriteAccess();
}
@Override
public List<Capability> capabilities() {
List<Capability> capabilities = this.innerModel().capabilities();
if (capabilities == null) {
capabilities = new ArrayList<>();
}
return Collections.unmodifiableList(capabilities);
}
@Override
public List<VirtualNetworkRule> virtualNetworkRules() {
List<VirtualNetworkRule> result =
(this.innerModel() != null && this.innerModel().virtualNetworkRules() != null)
? this.innerModel().virtualNetworkRules()
: new ArrayList<VirtualNetworkRule>();
return Collections.unmodifiableList(result);
}
@Override
public void offlineRegion(Region region) {
this.manager().serviceClient().getDatabaseAccounts().offlineRegion(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public Mono<Void> offlineRegionAsync(Region region) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.offlineRegionAsync(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public void onlineRegion(Region region) {
this.manager().serviceClient().getDatabaseAccounts().onlineRegion(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public Mono<Void> onlineRegionAsync(Region region) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.onlineRegionAsync(this.resourceGroupName(), this.name(),
new RegionForOnlineOffline().withRegion(region.label()));
}
@Override
public void regenerateKey(KeyKind keyKind) {
this.manager().serviceClient().getDatabaseAccounts().regenerateKey(this.resourceGroupName(), this.name(),
new DatabaseAccountRegenerateKeyParameters().withKeyKind(keyKind));
}
@Override
public Mono<Void> regenerateKeyAsync(KeyKind keyKind) {
return this
.manager()
.serviceClient()
.getDatabaseAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new DatabaseAccountRegenerateKeyParameters().withKeyKind(keyKind));
}
@Override
public CosmosDBAccountImpl withKind(DatabaseAccountKind kind) {
this.innerModel().withKind(kind);
return this;
}
@Override
public CosmosDBAccountImpl withKind(DatabaseAccountKind kind, Capability... capabilities) {
this.innerModel().withKind(kind);
this.innerModel().withCapabilities(Arrays.asList(capabilities));
return this;
}
@Override
public CosmosDBAccountImpl withDataModelSql() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
return this;
}
@Override
public CosmosDBAccountImpl withDataModelMongoDB() {
this.innerModel().withKind(DatabaseAccountKind.MONGO_DB);
return this;
}
@Override
public CosmosDBAccountImpl withDataModelCassandra() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableCassandra"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Cassandra");
return this;
}
@Override
public CosmosDBAccountImpl withDataModelAzureTable() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableTable"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Table");
return this;
}
@Override
public CosmosDBAccountImpl withDataModelGremlin() {
this.innerModel().withKind(DatabaseAccountKind.GLOBAL_DOCUMENT_DB);
List<Capability> capabilities = new ArrayList<Capability>();
capabilities.add(new Capability().withName("EnableGremlin"));
this.innerModel().withCapabilities(capabilities);
this.withTag("defaultExperience", "Graph");
return this;
}
@Override
public CosmosDBAccountImpl withIpRangeFilter(String ipRangeFilter) {
List<IpAddressOrRange> rules = new ArrayList<>();
if (!CoreUtils.isNullOrEmpty(ipRangeFilter)) {
for (String ip : ipRangeFilter.split(",")) {
rules.add(new IpAddressOrRange().withIpAddressOrRange(ip));
}
}
this.innerModel().withIpRules(rules);
return this;
}
@Override
public CosmosDBAccountImpl withIpRules(List<IpAddressOrRange> ipRules) {
this.innerModel().withIpRules(ipRules);
return this;
}
@Override
protected Mono<DatabaseAccountGetResultsInner> getInnerAsync() {
return this.manager().serviceClient().getDatabaseAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public CosmosDBAccountImpl withWriteReplication(Region region) {
FailoverPolicy failoverPolicyInner = new FailoverPolicy();
failoverPolicyInner.withLocationName(region.name());
this.hasFailoverPolicyChanges = true;
this.failoverPolicies.add(failoverPolicyInner);
return this;
}
@Override
public CosmosDBAccountImpl withReadReplication(Region region) {
this.ensureFailoverIsInitialized();
FailoverPolicy failoverPolicyInner = new FailoverPolicy();
failoverPolicyInner.withLocationName(region.name());
failoverPolicyInner.withFailoverPriority(this.failoverPolicies.size());
this.hasFailoverPolicyChanges = true;
this.failoverPolicies.add(failoverPolicyInner);
return this;
}
@Override
public CosmosDBAccountImpl withoutReadReplication(Region region) {
this.ensureFailoverIsInitialized();
for (int i = 1; i < this.failoverPolicies.size(); i++) {
if (this.failoverPolicies.get(i).locationName() != null) {
String locName = formatLocationName(this.failoverPolicies.get(i).locationName());
if (locName.equals(region.name())) {
this.failoverPolicies.remove(i);
}
}
}
return this;
}
@Override
public CosmosDBAccountImpl withEventualConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.EVENTUAL, 0, 0);
return this;
}
@Override
public CosmosDBAccountImpl withSessionConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.SESSION, 0, 0);
return this;
}
@Override
public CosmosDBAccountImpl withBoundedStalenessConsistency(long maxStalenessPrefix, int maxIntervalInSeconds) {
this.setConsistencyPolicy(DefaultConsistencyLevel.BOUNDED_STALENESS, maxStalenessPrefix, maxIntervalInSeconds);
return this;
}
@Override
public CosmosDBAccountImpl withStrongConsistency() {
this.setConsistencyPolicy(DefaultConsistencyLevel.STRONG, 0, 0);
return this;
}
@Override
public PrivateEndpointConnectionImpl defineNewPrivateEndpointConnection(String name) {
return this.privateEndpointConnections.define(name);
}
@Override
public PrivateEndpointConnectionImpl updatePrivateEndpointConnection(String name) {
return this.privateEndpointConnections.update(name);
}
@Override
public CosmosDBAccountImpl withoutPrivateEndpointConnection(String name) {
this.privateEndpointConnections.remove(name);
return this;
}
CosmosDBAccountImpl withPrivateEndpointConnection(PrivateEndpointConnectionImpl privateEndpointConnection) {
this.privateEndpointConnections.addPrivateEndpointConnection(privateEndpointConnection);
return this;
}
@Override
public Mono<CosmosDBAccount> createResourceAsync() {
return this.doDatabaseUpdateCreate();
}
private DatabaseAccountCreateUpdateParameters createUpdateParametersInner(DatabaseAccountGetResultsInner inner) {
this.ensureFailoverIsInitialized();
DatabaseAccountCreateUpdateParameters createUpdateParametersInner = new DatabaseAccountCreateUpdateParameters();
createUpdateParametersInner.withLocation(this.regionName().toLowerCase(Locale.ROOT));
createUpdateParametersInner.withConsistencyPolicy(inner.consistencyPolicy());
createUpdateParametersInner.withIpRules(inner.ipRules());
createUpdateParametersInner.withKind(inner.kind());
createUpdateParametersInner.withCapabilities(inner.capabilities());
createUpdateParametersInner.withTags(inner.tags());
createUpdateParametersInner.withEnableMultipleWriteLocations(inner.enableMultipleWriteLocations());
this
.addLocationsForParameters(
new CreateUpdateLocationParameters(createUpdateParametersInner), this.failoverPolicies);
createUpdateParametersInner.withIsVirtualNetworkFilterEnabled(inner.isVirtualNetworkFilterEnabled());
createUpdateParametersInner.withEnableCassandraConnector(inner.enableCassandraConnector());
createUpdateParametersInner.withConnectorOffer(inner.connectorOffer());
createUpdateParametersInner.withEnableAutomaticFailover(inner.enableAutomaticFailover());
createUpdateParametersInner.withDisableKeyBasedMetadataWriteAccess(inner.disableKeyBasedMetadataWriteAccess());
if (this.virtualNetworkRulesMap != null) {
createUpdateParametersInner
.withVirtualNetworkRules(new ArrayList<VirtualNetworkRule>(this.virtualNetworkRulesMap.values()));
this.virtualNetworkRulesMap = null;
}
return createUpdateParametersInner;
}
private DatabaseAccountUpdateParameters updateParametersInner(DatabaseAccountGetResultsInner inner) {
this.ensureFailoverIsInitialized();
DatabaseAccountUpdateParameters updateParameters = new DatabaseAccountUpdateParameters();
updateParameters.withTags(inner.tags());
updateParameters.withLocation(this.regionName().toLowerCase(Locale.ROOT));
updateParameters.withConsistencyPolicy(inner.consistencyPolicy());
updateParameters.withIpRules(inner.ipRules());
updateParameters.withIsVirtualNetworkFilterEnabled(inner.isVirtualNetworkFilterEnabled());
updateParameters.withEnableAutomaticFailover(inner.enableAutomaticFailover());
updateParameters.withCapabilities(inner.capabilities());
updateParameters.withEnableMultipleWriteLocations(inner.enableMultipleWriteLocations());
updateParameters.withEnableCassandraConnector(inner.enableCassandraConnector());
updateParameters.withConnectorOffer(inner.connectorOffer());
updateParameters.withDisableKeyBasedMetadataWriteAccess(inner.disableKeyBasedMetadataWriteAccess());
if (virtualNetworkRulesMap != null) {
updateParameters.withVirtualNetworkRules(new ArrayList<>(this.virtualNetworkRulesMap.values()));
virtualNetworkRulesMap = null;
}
this.addLocationsForParameters(new UpdateLocationParameters(updateParameters), this.failoverPolicies);
return updateParameters;
}
private static String fixDBName(String name) {
return name.toLowerCase(Locale.ROOT);
}
private void setConsistencyPolicy(
DefaultConsistencyLevel level, long maxStalenessPrefix, int maxIntervalInSeconds) {
ConsistencyPolicy policy = new ConsistencyPolicy();
policy.withDefaultConsistencyLevel(level);
if (level == DefaultConsistencyLevel.BOUNDED_STALENESS) {
policy.withMaxStalenessPrefix(maxStalenessPrefix);
policy.withMaxIntervalInSeconds(maxIntervalInSeconds);
}
this.innerModel().withConsistencyPolicy(policy);
}
private void addLocationsForParameters(HasLocations locationParameters, List<FailoverPolicy> failoverPolicies) {
List<Location> locations = new ArrayList<Location>();
if (failoverPolicies.size() > 0) {
for (int i = 0; i < failoverPolicies.size(); i++) {
FailoverPolicy policyInner = failoverPolicies.get(i);
Location location = new Location();
location.withFailoverPriority(i);
location.withLocationName(policyInner.locationName());
locations.add(location);
}
} else {
Location location = new Location();
location.withFailoverPriority(0);
location.withLocationName(locationParameters.location());
locations.add(location);
}
locationParameters.withLocations(locations);
}
private static String formatLocationName(String locationName) {
return locationName.replace(" ", "").toLowerCase(Locale.ROOT);
}
private Mono<CosmosDBAccount> doDatabaseUpdateCreate() {
final CosmosDBAccountImpl self = this;
final List<Integer> data = new ArrayList<Integer>();
data.add(0);
Mono<DatabaseAccountGetResultsInner> request = null;
HasLocations locationParameters = null;
if (isInCreateMode()) {
final DatabaseAccountCreateUpdateParameters createUpdateParametersInner =
this.createUpdateParametersInner(this.innerModel());
request =
this
.manager()
.serviceClient()
.getDatabaseAccounts()
.createOrUpdateAsync(resourceGroupName(), name(), createUpdateParametersInner);
locationParameters = new CreateUpdateLocationParameters(createUpdateParametersInner);
} else {
final DatabaseAccountUpdateParameters updateParametersInner = this.updateParametersInner(this.innerModel());
request =
this
.manager()
.serviceClient()
.getDatabaseAccounts()
.updateAsync(resourceGroupName(), name(), updateParametersInner);
locationParameters = new UpdateLocationParameters(updateParametersInner);
}
Set<String> locations = locationParameters.locations().stream()
.map(location -> formatLocationName(location.locationName()))
.collect(Collectors.toSet());
return request
.flatMap(
databaseAccountInner -> {
self.failoverPolicies.clear();
self.hasFailoverPolicyChanges = false;
return manager()
.databaseAccounts()
.getByResourceGroupAsync(resourceGroupName(), name())
.flatMap(
databaseAccount -> {
if (MAX_DELAY_DUE_TO_MISSING_FAILOVERS > data.get(0)
&& (databaseAccount.id() == null
|| databaseAccount.id().length() == 0
|| locations.size()
!= databaseAccount.innerModel().failoverPolicies().size())) {
return Mono.empty();
}
if (isAFinalProvisioningState(databaseAccount.innerModel().provisioningState())) {
for (Location location : databaseAccount.readableReplications()) {
if (!isAFinalProvisioningState(location.provisioningState())) {
return Mono.empty();
}
if (!locations.contains(formatLocationName(location.locationName()))) {
return Mono.empty();
}
}
} else {
return Mono.empty();
}
self.setInner(databaseAccount.innerModel());
return Mono.just(databaseAccount);
})
.repeatWhenEmpty(
longFlux ->
longFlux
.flatMap(
index -> {
data.set(0, data.get(0) + 30);
return Mono.delay(ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(
manager().serviceClient().getDefaultPollInterval()));
}));
});
}
private void ensureFailoverIsInitialized() {
if (this.isInCreateMode()) {
return;
}
if (!this.hasFailoverPolicyChanges) {
this.failoverPolicies.clear();
FailoverPolicy[] policyInners = new FailoverPolicy[this.innerModel().failoverPolicies().size()];
this.innerModel().failoverPolicies().toArray(policyInners);
Arrays
.sort(
policyInners,
Comparator.comparing(FailoverPolicy::failoverPriority));
for (int i = 0; i < policyInners.length; i++) {
this.failoverPolicies.add(policyInners[i]);
}
this.hasFailoverPolicyChanges = true;
}
}
private boolean isAFinalProvisioningState(String state) {
switch (state.toLowerCase(Locale.ROOT)) {
case "succeeded":
case "canceled":
case "failed":
return true;
default:
return false;
}
}
private Map<String, VirtualNetworkRule> ensureVirtualNetworkRules() {
if (this.virtualNetworkRulesMap == null) {
this.virtualNetworkRulesMap = new HashMap<>();
if (this.innerModel() != null && this.innerModel().virtualNetworkRules() != null) {
for (VirtualNetworkRule virtualNetworkRule : this.innerModel().virtualNetworkRules()) {
this.virtualNetworkRulesMap.put(virtualNetworkRule.id(), virtualNetworkRule);
}
}
}
return this.virtualNetworkRulesMap;
}
@Override
public CosmosDBAccountImpl withVirtualNetwork(String virtualNetworkId, String subnetName) {
this.innerModel().withIsVirtualNetworkFilterEnabled(true);
String vnetId = virtualNetworkId + "/subnets/" + subnetName;
ensureVirtualNetworkRules().put(vnetId, new VirtualNetworkRule().withId(vnetId));
return this;
}
@Override
public CosmosDBAccountImpl withoutVirtualNetwork(String virtualNetworkId, String subnetName) {
Map<String, VirtualNetworkRule> vnetRules = ensureVirtualNetworkRules();
vnetRules.remove(virtualNetworkId + "/subnets/" + subnetName);
if (vnetRules.size() == 0) {
this.innerModel().withIsVirtualNetworkFilterEnabled(false);
}
return this;
}
@Override
public CosmosDBAccountImpl withVirtualNetworkRules(List<VirtualNetworkRule> virtualNetworkRules) {
Map<String, VirtualNetworkRule> vnetRules = ensureVirtualNetworkRules();
if (virtualNetworkRules == null || virtualNetworkRules.isEmpty()) {
vnetRules.clear();
this.innerModel().withIsVirtualNetworkFilterEnabled(false);
return this;
}
this.innerModel().withIsVirtualNetworkFilterEnabled(true);
for (VirtualNetworkRule vnetRule : virtualNetworkRules) {
this.virtualNetworkRulesMap.put(vnetRule.id(), vnetRule);
}
return this;
}
@Override
public CosmosDBAccountImpl withMultipleWriteLocationsEnabled(boolean enabled) {
this.innerModel().withEnableMultipleWriteLocations(enabled);
return this;
}
@Override
public CosmosDBAccountImpl withCassandraConnector(ConnectorOffer connectorOffer) {
this.innerModel().withEnableCassandraConnector(true);
this.innerModel().withConnectorOffer(connectorOffer);
return this;
}
@Override
public CosmosDBAccountImpl withoutCassandraConnector() {
this.innerModel().withEnableCassandraConnector(false);
this.innerModel().withConnectorOffer(null);
return this;
}
@Override
public CosmosDBAccountImpl withDisableKeyBaseMetadataWriteAccess(boolean disabled) {
this.innerModel().withDisableKeyBasedMetadataWriteAccess(disabled);
return this;
}
interface HasLocations {
String location();
List<Location> locations();
void withLocations(List<Location> locations);
}
static class CreateUpdateLocationParameters implements HasLocations {
private DatabaseAccountCreateUpdateParameters parameters;
CreateUpdateLocationParameters(DatabaseAccountCreateUpdateParameters parametersObject) {
parameters = parametersObject;
}
@Override
public String location() {
return parameters.location();
}
@Override
public List<Location> locations() {
return parameters.locations();
}
@Override
public void withLocations(List<Location> locations) {
parameters.withLocations(locations);
}
}
static class UpdateLocationParameters implements HasLocations {
private DatabaseAccountUpdateParameters parameters;
UpdateLocationParameters(DatabaseAccountUpdateParameters parametersObject) {
parameters = parametersObject;
}
@Override
public String location() {
return parameters.location();
}
@Override
public List<Location> locations() {
return parameters.locations();
}
@Override
public void withLocations(List<Location> locations) {
parameters.withLocations(locations);
}
}
} |
We should use `Configuration` so we can pass `STORAGE_CONNECTION_STRING` via system properties or environment variables | public ServiceTest(TOptions options) {
super(options);
String connectionString = System.getenv("STORAGE_CONNECTION_STRING");
if (CoreUtils.isNullOrEmpty(connectionString)) {
System.out.println("Environment variable STORAGE_CONNECTION_STRING must be set");
System.exit(1);
}
shareServiceClient = new ShareServiceClientBuilder().connectionString(connectionString).
buildClient();
shareServiceAsyncClient = new ShareServiceClientBuilder().connectionString(connectionString).
buildAsyncClient();
} | String connectionString = System.getenv("STORAGE_CONNECTION_STRING"); | public ServiceTest(TOptions options) {
super(options);
configuration = Configuration.getGlobalConfiguration().clone();
String connectionString = configuration.get("STORAGE_CONNECTION_STRING");
if (CoreUtils.isNullOrEmpty(connectionString)) {
throw new IllegalStateException("Environment variable STORAGE_CONNECTION_STRING must be set");
}
shareServiceClient = new ShareServiceClientBuilder().connectionString(connectionString).
buildClient();
shareServiceAsyncClient = new ShareServiceClientBuilder().connectionString(connectionString).
buildAsyncClient();
} | class ServiceTest<TOptions extends PerfStressOptions> extends PerfStressTest<TOptions> {
protected final ShareServiceClient shareServiceClient;
protected final ShareServiceAsyncClient shareServiceAsyncClient;
} | class ServiceTest<TOptions extends PerfStressOptions> extends PerfStressTest<TOptions> {
protected static final long MAX_SHARE_SIZE = 4398046511104L;
protected final ShareServiceClient shareServiceClient;
protected final ShareServiceAsyncClient shareServiceAsyncClient;
private final Configuration configuration;
} |
Do we have to upload as part of setup? | public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(dataLakeFileAsyncClient.create())
.then(dataLakeFileAsyncClient.upload(createRandomByteBufferFlux(options.getSize()), null, true))
.then();
} | .then(dataLakeFileAsyncClient.upload(createRandomByteBufferFlux(options.getSize()), null, true)) | public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(dataLakeFileAsyncClient.create())
.then();
} | class AppendFileDatalakeTest extends DirectoryTest<PerfStressOptions> {
private static final int BUFFER_SIZE = 16 * 1024 * 1024;
private static final String FILE_NAME = "perfstress-file-" + UUID.randomUUID().toString();
private final byte[] buffer = new byte[BUFFER_SIZE];
protected final DataLakeFileClient dataLakeFileClient;
protected final DataLakeFileAsyncClient dataLakeFileAsyncClient;
public AppendFileDatalakeTest(PerfStressOptions options) {
super(options);
dataLakeFileClient = dataLakeDirectoryClient.getFileClient(FILE_NAME);
dataLakeFileAsyncClient = dataLakeDirectoryAsyncClient.getFileAsyncClient(FILE_NAME);
}
@Override
public void run() {
dataLakeFileClient.append(TestDataCreationHelper.createRandomInputStream(options.getSize()), 0, options.getSize());
}
@Override
public Mono<Void> runAsync() {
return dataLakeFileAsyncClient.append(createRandomByteBufferFlux(options.getSize()), 0, options.getSize());
}
public Mono<Void> globalCleanupAsync() {
return dataLakeFileAsyncClient.delete()
.then(super.globalCleanupAsync())
.then();
}
} | class AppendFileDatalakeTest extends DirectoryTest<PerfStressOptions> {
private static final String FILE_NAME = "perfstress-file-" + UUID.randomUUID().toString();
protected final DataLakeFileClient dataLakeFileClient;
protected final DataLakeFileAsyncClient dataLakeFileAsyncClient;
protected final RepeatingInputStream inputStream;
protected final Flux<ByteBuffer> byteBufferFlux;
public AppendFileDatalakeTest(PerfStressOptions options) {
super(options);
dataLakeFileClient = dataLakeDirectoryClient.getFileClient(FILE_NAME);
dataLakeFileAsyncClient = dataLakeDirectoryAsyncClient.getFileAsyncClient(FILE_NAME);
inputStream = (RepeatingInputStream) TestDataCreationHelper.createRandomInputStream(options.getSize());
byteBufferFlux = createRandomByteBufferFlux(options.getSize());
}
@Override
public void run() {
inputStream.reset();
dataLakeFileClient.append(TestDataCreationHelper.createRandomInputStream(options.getSize()),
0, options.getSize());
}
@Override
public Mono<Void> runAsync() {
return dataLakeFileAsyncClient.append(byteBufferFlux, 0, options.getSize());
}
public Mono<Void> globalCleanupAsync() {
return dataLakeFileAsyncClient.delete()
.then(super.globalCleanupAsync())
.then();
}
} |
Probably not, assuming we can append immediately after create. | public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(dataLakeFileAsyncClient.create())
.then(dataLakeFileAsyncClient.upload(createRandomByteBufferFlux(options.getSize()), null, true))
.then();
} | .then(dataLakeFileAsyncClient.upload(createRandomByteBufferFlux(options.getSize()), null, true)) | public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(dataLakeFileAsyncClient.create())
.then();
} | class AppendFileDatalakeTest extends DirectoryTest<PerfStressOptions> {
private static final int BUFFER_SIZE = 16 * 1024 * 1024;
private static final String FILE_NAME = "perfstress-file-" + UUID.randomUUID().toString();
private final byte[] buffer = new byte[BUFFER_SIZE];
protected final DataLakeFileClient dataLakeFileClient;
protected final DataLakeFileAsyncClient dataLakeFileAsyncClient;
public AppendFileDatalakeTest(PerfStressOptions options) {
super(options);
dataLakeFileClient = dataLakeDirectoryClient.getFileClient(FILE_NAME);
dataLakeFileAsyncClient = dataLakeDirectoryAsyncClient.getFileAsyncClient(FILE_NAME);
}
@Override
public void run() {
dataLakeFileClient.append(TestDataCreationHelper.createRandomInputStream(options.getSize()), 0, options.getSize());
}
@Override
public Mono<Void> runAsync() {
return dataLakeFileAsyncClient.append(createRandomByteBufferFlux(options.getSize()), 0, options.getSize());
}
public Mono<Void> globalCleanupAsync() {
return dataLakeFileAsyncClient.delete()
.then(super.globalCleanupAsync())
.then();
}
} | class AppendFileDatalakeTest extends DirectoryTest<PerfStressOptions> {
private static final String FILE_NAME = "perfstress-file-" + UUID.randomUUID().toString();
protected final DataLakeFileClient dataLakeFileClient;
protected final DataLakeFileAsyncClient dataLakeFileAsyncClient;
protected final RepeatingInputStream inputStream;
protected final Flux<ByteBuffer> byteBufferFlux;
public AppendFileDatalakeTest(PerfStressOptions options) {
super(options);
dataLakeFileClient = dataLakeDirectoryClient.getFileClient(FILE_NAME);
dataLakeFileAsyncClient = dataLakeDirectoryAsyncClient.getFileAsyncClient(FILE_NAME);
inputStream = (RepeatingInputStream) TestDataCreationHelper.createRandomInputStream(options.getSize());
byteBufferFlux = createRandomByteBufferFlux(options.getSize());
}
@Override
public void run() {
inputStream.reset();
dataLakeFileClient.append(TestDataCreationHelper.createRandomInputStream(options.getSize()),
0, options.getSize());
}
@Override
public Mono<Void> runAsync() {
return dataLakeFileAsyncClient.append(byteBufferFlux, 0, options.getSize());
}
public Mono<Void> globalCleanupAsync() {
return dataLakeFileAsyncClient.delete()
.then(super.globalCleanupAsync())
.then();
}
} |
Should this return `0` or `-1`? | public int read() throws IOException {
return 0;
} | return 0; | public int read() throws IOException {
return -1;
} | class NullInputStream extends InputStream {
@Override
} | class NullInputStream extends InputStream {
@Override
} |
I think this could be slightly optimized by creating the input stream once and resetting before each operation: ```java protected final InputStream inputStream; public AppendFileDatalakeTest(PerfStressOptions options) { inputStream = TestDataCreationHelper.createRandomInputStream(options.getSize()); inputStream.mark(); } public void run() { inputStream.reset(); dataLakeFileClient.append(inputStream, 0, options.getSize()); } ``` | public void run() {
dataLakeFileClient.append(TestDataCreationHelper.createRandomInputStream(options.getSize()),
0, options.getSize());
} | dataLakeFileClient.append(TestDataCreationHelper.createRandomInputStream(options.getSize()), | public void run() {
inputStream.reset();
dataLakeFileClient.append(TestDataCreationHelper.createRandomInputStream(options.getSize()),
0, options.getSize());
} | class AppendFileDatalakeTest extends DirectoryTest<PerfStressOptions> {
private static final String FILE_NAME = "perfstress-file-" + UUID.randomUUID().toString();
protected final DataLakeFileClient dataLakeFileClient;
protected final DataLakeFileAsyncClient dataLakeFileAsyncClient;
public AppendFileDatalakeTest(PerfStressOptions options) {
super(options);
dataLakeFileClient = dataLakeDirectoryClient.getFileClient(FILE_NAME);
dataLakeFileAsyncClient = dataLakeDirectoryAsyncClient.getFileAsyncClient(FILE_NAME);
}
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(dataLakeFileAsyncClient.create())
.then();
}
@Override
@Override
public Mono<Void> runAsync() {
return dataLakeFileAsyncClient.append(createRandomByteBufferFlux(options.getSize()), 0, options.getSize());
}
public Mono<Void> globalCleanupAsync() {
return dataLakeFileAsyncClient.delete()
.then(super.globalCleanupAsync())
.then();
}
} | class AppendFileDatalakeTest extends DirectoryTest<PerfStressOptions> {
private static final String FILE_NAME = "perfstress-file-" + UUID.randomUUID().toString();
protected final DataLakeFileClient dataLakeFileClient;
protected final DataLakeFileAsyncClient dataLakeFileAsyncClient;
protected final RepeatingInputStream inputStream;
protected final Flux<ByteBuffer> byteBufferFlux;
public AppendFileDatalakeTest(PerfStressOptions options) {
super(options);
dataLakeFileClient = dataLakeDirectoryClient.getFileClient(FILE_NAME);
dataLakeFileAsyncClient = dataLakeDirectoryAsyncClient.getFileAsyncClient(FILE_NAME);
inputStream = (RepeatingInputStream) TestDataCreationHelper.createRandomInputStream(options.getSize());
byteBufferFlux = createRandomByteBufferFlux(options.getSize());
}
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(dataLakeFileAsyncClient.create())
.then();
}
@Override
@Override
public Mono<Void> runAsync() {
return dataLakeFileAsyncClient.append(byteBufferFlux, 0, options.getSize());
}
public Mono<Void> globalCleanupAsync() {
return dataLakeFileAsyncClient.delete()
.then(super.globalCleanupAsync())
.then();
}
} |
@alzimmermsft: Do you know if the `randomByteBufferFlux` could be reused in the same way as the `InputStream` above? Or does each call to `runAsync()` require creating a new `randomByteBufferFlux`? | public Mono<Void> runAsync() {
return dataLakeFileAsyncClient.append(createRandomByteBufferFlux(options.getSize()), 0, options.getSize());
} | return dataLakeFileAsyncClient.append(createRandomByteBufferFlux(options.getSize()), 0, options.getSize()); | public Mono<Void> runAsync() {
return dataLakeFileAsyncClient.append(byteBufferFlux, 0, options.getSize());
} | class AppendFileDatalakeTest extends DirectoryTest<PerfStressOptions> {
private static final String FILE_NAME = "perfstress-file-" + UUID.randomUUID().toString();
protected final DataLakeFileClient dataLakeFileClient;
protected final DataLakeFileAsyncClient dataLakeFileAsyncClient;
public AppendFileDatalakeTest(PerfStressOptions options) {
super(options);
dataLakeFileClient = dataLakeDirectoryClient.getFileClient(FILE_NAME);
dataLakeFileAsyncClient = dataLakeDirectoryAsyncClient.getFileAsyncClient(FILE_NAME);
}
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(dataLakeFileAsyncClient.create())
.then();
}
@Override
public void run() {
dataLakeFileClient.append(TestDataCreationHelper.createRandomInputStream(options.getSize()),
0, options.getSize());
}
@Override
public Mono<Void> globalCleanupAsync() {
return dataLakeFileAsyncClient.delete()
.then(super.globalCleanupAsync())
.then();
}
} | class AppendFileDatalakeTest extends DirectoryTest<PerfStressOptions> {
private static final String FILE_NAME = "perfstress-file-" + UUID.randomUUID().toString();
protected final DataLakeFileClient dataLakeFileClient;
protected final DataLakeFileAsyncClient dataLakeFileAsyncClient;
protected final RepeatingInputStream inputStream;
protected final Flux<ByteBuffer> byteBufferFlux;
public AppendFileDatalakeTest(PerfStressOptions options) {
super(options);
dataLakeFileClient = dataLakeDirectoryClient.getFileClient(FILE_NAME);
dataLakeFileAsyncClient = dataLakeDirectoryAsyncClient.getFileAsyncClient(FILE_NAME);
inputStream = (RepeatingInputStream) TestDataCreationHelper.createRandomInputStream(options.getSize());
byteBufferFlux = createRandomByteBufferFlux(options.getSize());
}
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(dataLakeFileAsyncClient.create())
.then();
}
@Override
public void run() {
inputStream.reset();
dataLakeFileClient.append(TestDataCreationHelper.createRandomInputStream(options.getSize()),
0, options.getSize());
}
@Override
public Mono<Void> globalCleanupAsync() {
return dataLakeFileAsyncClient.delete()
.then(super.globalCleanupAsync())
.then();
}
} |
Reuse input stream. | public void run() {
dataLakeFileClient.upload(createRandomInputStream(options.getSize()), options.getSize(), true);
} | dataLakeFileClient.upload(createRandomInputStream(options.getSize()), options.getSize(), true); | public void run() {
inputStream.reset();
dataLakeFileClient.upload(inputStream, options.getSize(), true);
} | class UploadFileDatalakeTest extends FileTestBase<PerfStressOptions> {
public UploadFileDatalakeTest(PerfStressOptions options) {
super(options);
}
@Override
@Override
public Mono<Void> runAsync() {
return dataLakeFileAsyncClient.upload(createRandomByteBufferFlux(options.getSize()), null, true)
.then();
}
} | class UploadFileDatalakeTest extends FileTestBase<PerfStressOptions> {
protected final RepeatingInputStream inputStream;
protected final Flux<ByteBuffer> byteBufferFlux;
public UploadFileDatalakeTest(PerfStressOptions options) {
super(options);
inputStream = (RepeatingInputStream) TestDataCreationHelper.createRandomInputStream(options.getSize());
byteBufferFlux = createRandomByteBufferFlux(options.getSize());
}
@Override
@Override
public Mono<Void> runAsync() {
return dataLakeFileAsyncClient.upload(createRandomByteBufferFlux(options.getSize()), null, true)
.then();
}
} |
The perf impact should be negligible, but as a principle it would be better to call `TEMP_FILE.toString()` once and store the result in a variable, rather than calling it every iteration. | public void run() {
dataLakeFileClient.uploadFromFile(TEMP_FILE.toString(), true);
} | dataLakeFileClient.uploadFromFile(TEMP_FILE.toString(), true); | public void run() {
dataLakeFileClient.uploadFromFile(TEMP_FILE_PATH, true);
} | class UploadFromFileDatalakeTest extends FileTestBase<PerfStressOptions> {
private static final Path TEMP_FILE;
static {
try {
TEMP_FILE = Files.createTempFile(null, null);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public UploadFromFileDatalakeTest(PerfStressOptions options) {
super(options);
}
@Override
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync().then(createTempFile());
}
@Override
public Mono<Void> globalCleanupAsync() {
return deleteTempFile().then(super.globalCleanupAsync());
}
private Mono<Void> createTempFile() {
try (InputStream inputStream = createRandomInputStream(options.getSize());
OutputStream outputStream = new FileOutputStream(TEMP_FILE.toString())) {
copyStream(inputStream, outputStream);
return Mono.empty();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private Mono<Void> deleteTempFile() {
try {
Files.delete(TEMP_FILE);
return Mono.empty();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
@Override
public Mono<Void> runAsync() {
return dataLakeFileAsyncClient.uploadFromFile(TEMP_FILE.toString(), true);
}
} | class UploadFromFileDatalakeTest extends FileTestBase<PerfStressOptions> {
private static final Path TEMP_FILE;
private static final String TEMP_FILE_PATH;
static {
try {
TEMP_FILE = Files.createTempFile(null, null);
TEMP_FILE_PATH = TEMP_FILE.toString();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public UploadFromFileDatalakeTest(PerfStressOptions options) {
super(options);
}
@Override
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync().then(createTempFile());
}
@Override
public Mono<Void> globalCleanupAsync() {
return deleteTempFile().then(super.globalCleanupAsync());
}
private Mono<Void> createTempFile() {
return Mono.fromCallable(() -> {
TestDataCreationHelper.writeToFile(TEMP_FILE_PATH, options.getSize(), DEFAULT_BUFFER_SIZE);
return 1;
}).then();
}
private Mono<Void> deleteTempFile() {
try {
Files.delete(TEMP_FILE);
return Mono.empty();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
@Override
public Mono<Void> runAsync() {
return dataLakeFileAsyncClient.uploadFromFile(TEMP_FILE_PATH, true);
}
} |
I recommend adding a helper method in the PerfTest base class to get an env var and throw if not set. Example in .NET: https://github.com/Azure/azure-sdk-for-net/blob/master/common/Perf/Azure.Test.Perf/PerfTest.cs#L67-L75 | public ServiceTest(TOptions options) {
super(options);
String connectionString = System.getenv("STORAGE_CONNECTION_STRING");
if (CoreUtils.isNullOrEmpty(connectionString)) {
throw new IllegalStateException("Environment variable STORAGE_CONNECTION_STRING must be set");
}
StorageConnectionString storageConnectionString
= StorageConnectionString.create(connectionString, null);
StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint();
dataLakeServiceClient = new DataLakeServiceClientBuilder()
.endpoint(endpoint.getPrimaryUri())
.credential(new StorageSharedKeyCredential(storageConnectionString.getAccountName(),
storageConnectionString.getStorageAuthSettings().getAccount().getAccessKey()))
.buildClient();
dataLakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(endpoint.getPrimaryUri())
.credential(new StorageSharedKeyCredential(storageConnectionString.getAccountName(),
storageConnectionString.getStorageAuthSettings().getAccount().getAccessKey()))
.buildAsyncClient();
} | String connectionString = System.getenv("STORAGE_CONNECTION_STRING"); | public ServiceTest(TOptions options) {
super(options);
configuration = Configuration.getGlobalConfiguration().clone();
String connectionString = configuration.get("STORAGE_CONNECTION_STRING");
if (CoreUtils.isNullOrEmpty(connectionString)) {
throw new IllegalStateException("Environment variable STORAGE_CONNECTION_STRING must be set");
}
StorageConnectionString storageConnectionString
= StorageConnectionString.create(connectionString, null);
StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint();
dataLakeServiceClient = new DataLakeServiceClientBuilder()
.endpoint(endpoint.getPrimaryUri())
.credential(new StorageSharedKeyCredential(storageConnectionString.getAccountName(),
storageConnectionString.getStorageAuthSettings().getAccount().getAccessKey()))
.buildClient();
dataLakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(endpoint.getPrimaryUri())
.credential(new StorageSharedKeyCredential(storageConnectionString.getAccountName(),
storageConnectionString.getStorageAuthSettings().getAccount().getAccessKey()))
.buildAsyncClient();
} | class ServiceTest<TOptions extends PerfStressOptions> extends PerfStressTest<TOptions> {
protected final DataLakeServiceClient dataLakeServiceClient;
protected final DataLakeServiceAsyncClient dataLakeServiceAsyncClient;
} | class ServiceTest<TOptions extends PerfStressOptions> extends PerfStressTest<TOptions> {
protected final DataLakeServiceClient dataLakeServiceClient;
protected final DataLakeServiceAsyncClient dataLakeServiceAsyncClient;
private final Configuration configuration;
} |
Each instance of this test needs to write to a unique file, but within an instance I believe each operation can write to the same file. So I think you can create the file in `setupAsync()`, save the name in an instance variable, then delete the file in `cleanupAsync()`. I slightly prefer deleting in `cleanupAsync()` as opposed to using `deleteOnExit()` since it's more similar to other languages and will also allow us to see errors during deletion if it fails for some reason. | public void run() {
File file = new File(UUID.randomUUID().toString());
file.deleteOnExit();
shareFileClient.downloadToFile(file.getAbsolutePath());
} | File file = new File(UUID.randomUUID().toString()); | public void run() {
File file = new File(UUID.randomUUID().toString());
file.deleteOnExit();
shareFileClient.downloadToFile(file.getAbsolutePath());
} | class DownloadToFileShareTest extends DirectoryTest<PerfStressOptions> {
private static final String FILE_NAME = "perfstress-file-" + UUID.randomUUID().toString();
protected final ShareFileClient shareFileClient;
protected final ShareFileAsyncClient shareFileAsyncClient;
public DownloadToFileShareTest(PerfStressOptions options) {
super(options);
shareFileClient = shareDirectoryClient.getFileClient(FILE_NAME);
shareFileAsyncClient = shareDirectoryAsyncClient.getFileClient(FILE_NAME);
}
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(shareFileAsyncClient.create(options.getSize()))
.then(shareFileAsyncClient.upload(createRandomByteBufferFlux(options.getSize()), options.getSize()))
.then();
}
@Override
@Override
public Mono<Void> runAsync() {
File file = new File(UUID.randomUUID().toString());
file.deleteOnExit();
return shareFileAsyncClient.downloadToFile(file.getAbsolutePath()).then();
}
public Mono<Void> globalCleanupAsync() {
return shareFileAsyncClient.delete()
.then(super.globalCleanupAsync())
.then();
}
} | class DownloadToFileShareTest extends DirectoryTest<PerfStressOptions> {
protected final ShareFileClient shareFileClient;
protected final ShareFileAsyncClient shareFileAsyncClient;
public DownloadToFileShareTest(PerfStressOptions options) {
super(options);
String fileName = "perfstressdfile" + UUID.randomUUID().toString();
shareFileClient = shareDirectoryClient.getFileClient(fileName);
shareFileAsyncClient = shareDirectoryAsyncClient.getFileClient(fileName);
}
public Mono<Void> setupAsync() {
return super.setupAsync()
.then(shareFileAsyncClient.create(options.getSize()))
.then(shareFileAsyncClient.upload(createRandomByteBufferFlux(options.getSize()), options.getSize()))
.then();
}
@Override
@Override
public Mono<Void> runAsync() {
File file = new File(UUID.randomUUID().toString());
file.deleteOnExit();
return shareFileAsyncClient.downloadToFile(file.getAbsolutePath()).then();
}
} |
Re-use NullOutputStream | public void run() {
try {
cloudBlockBlob.download(new NullOutputStream());
} catch (StorageException e) {
throw new RuntimeException(e);
}
} | cloudBlockBlob.download(new NullOutputStream()); | public void run() {
try {
cloudBlockBlob.download(DEV_NULL);
} catch (StorageException e) {
throw new RuntimeException(e);
}
} | class DownloadBlobTest extends ContainerTest<PerfStressOptions> {
private final CloudBlockBlob cloudBlockBlob;
public DownloadBlobTest(PerfStressOptions options) {
super(options);
try {
cloudBlockBlob = cloudBlobContainer.getBlockBlobReference("downloadtest");
} catch (URISyntaxException | StorageException e) {
throw new RuntimeException(e);
}
}
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync().then(Mono.fromCallable(() -> {
try {
cloudBlockBlob.upload(TestDataCreationHelper
.createRandomInputStream(options.getSize()), options.getSize());
} catch (StorageException | IOException e) {
throw new RuntimeException(e);
}
return 1;
})).then();
}
@Override
@Override
public Mono<Void> runAsync() {
throw new UnsupportedOperationException();
}
} | class DownloadBlobTest extends ContainerTest<PerfStressOptions> {
private final CloudBlockBlob cloudBlockBlob;
private static final OutputStream DEV_NULL = new NullOutputStream();
public DownloadBlobTest(PerfStressOptions options) {
super(options);
try {
cloudBlockBlob = cloudBlobContainer.getBlockBlobReference("downloadtest");
} catch (URISyntaxException | StorageException e) {
throw new RuntimeException(e);
}
}
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync().then(Mono.fromCallable(() -> {
try {
cloudBlockBlob.upload(TestDataCreationHelper
.createRandomInputStream(options.getSize()), options.getSize());
} catch (StorageException | IOException e) {
throw new RuntimeException(e);
}
return 1;
})).then();
}
@Override
@Override
public Mono<Void> runAsync() {
throw new UnsupportedOperationException();
}
} |
Cache temp file string. | public void run() {
try {
cloudBlockBlob.uploadFromFile(tempFile.toString());
} catch (StorageException | IOException e) {
throw new RuntimeException(e);
}
} | cloudBlockBlob.uploadFromFile(tempFile.toString()); | public void run() {
try {
cloudBlockBlob.uploadFromFile(TEMP_FILE_PATH);
} catch (StorageException | IOException e) {
throw new RuntimeException(e);
}
} | class UploadFromFileTest extends BlobTestBase<PerfStressOptions> {
private static Path tempFile;
public UploadFromFileTest(PerfStressOptions options) {
super(options);
}
@Override
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync().then(createTempFile());
}
@Override
public Mono<Void> globalCleanupAsync() {
return deleteTempFile().then(super.globalCleanupAsync());
}
private Mono<Void> createTempFile() {
return Mono.fromCallable(() -> {
InputStream inputStream = null;
OutputStream outputStream = null;
try {
tempFile = Files.createTempFile(null, null);
inputStream = TestDataCreationHelper.createRandomInputStream(options.getSize());
outputStream = new FileOutputStream(tempFile.toString());
TestDataCreationHelper.copyStream(inputStream, outputStream, DEFAULT_BUFFER_SIZE);
return 1;
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
if (outputStream != null) {
outputStream.close();
}
if (inputStream != null) {
inputStream.close();
}
}
}).then();
}
private Mono<Void> deleteTempFile() {
try {
Files.delete(tempFile);
return Mono.empty();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
@Override
public Mono<Void> runAsync() {
throw new UnsupportedOperationException();
}
} | class UploadFromFileTest extends BlobTestBase<PerfStressOptions> {
private static final Path TEMP_FILE;
private static final String TEMP_FILE_PATH;
static {
try {
TEMP_FILE = Files.createTempFile(null, null);
TEMP_FILE_PATH = TEMP_FILE.toString();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public UploadFromFileTest(PerfStressOptions options) {
super(options);
}
@Override
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync().then(createTempFile());
}
@Override
public Mono<Void> globalCleanupAsync() {
return deleteTempFile().then(super.globalCleanupAsync());
}
private Mono<Void> createTempFile() {
return Mono.fromCallable(() -> {
TestDataCreationHelper.writeToFile(TEMP_FILE_PATH, options.getSize(), DEFAULT_BUFFER_SIZE);
return 1;
}).then();
}
private Mono<Void> deleteTempFile() {
try {
Files.delete(TEMP_FILE);
return Mono.empty();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
@Override
public Mono<Void> runAsync() {
throw new UnsupportedOperationException();
}
} |
Cache input stream | public void run() {
try {
InputStream inputStream = TestDataCreationHelper.createRandomInputStream(options.getSize());
BlobOutputStream outputStream = cloudBlockBlob.openOutputStream();
TestDataCreationHelper.copyStream(inputStream, outputStream, DEFAULT_BUFFER_SIZE);
outputStream.close();
} catch (StorageException | IOException e) {
throw new RuntimeException(e);
}
} | InputStream inputStream = TestDataCreationHelper.createRandomInputStream(options.getSize()); | public void run() {
try {
BlobOutputStream outputStream = cloudBlockBlob.openOutputStream();
TestDataCreationHelper.writeBytesToOutputStream(outputStream, options.getSize());
outputStream.close();
} catch (StorageException | IOException e) {
throw new RuntimeException(e);
}
} | class UploadOutputStreamTest extends BlobTestBase<PerfStressOptions> {
public UploadOutputStreamTest(PerfStressOptions options) {
super(options);
}
@Override
@Override
public Mono<Void> runAsync() {
throw new UnsupportedOperationException();
}
} | class UploadOutputStreamTest extends BlobTestBase<PerfStressOptions> {
public UploadOutputStreamTest(PerfStressOptions options) {
super(options);
}
@Override
@Override
public Mono<Void> runAsync() {
throw new UnsupportedOperationException();
}
} |
Re-use temp file | public void run() {
try {
File tempFile = File.createTempFile("tempFile", "fileshare");
tempFile.deleteOnExit();
cloudFile.downloadToFile(tempFile.getAbsolutePath());
} catch (StorageException | IOException e) {
throw new RuntimeException(e);
}
} | File tempFile = File.createTempFile("tempFile", "fileshare"); | public void run() {
try {
cloudFile.downloadToFile(targetFilePath);
} catch (StorageException | IOException e) {
throw new RuntimeException(e);
}
} | class DownloadToFileShareTest extends DirectoryTest<PerfStressOptions> {
private static final String FILE_NAME = "perfstress-file-" + UUID.randomUUID().toString();
private final CloudFile cloudFile;
public DownloadToFileShareTest(PerfStressOptions options) {
super(options);
try {
cloudFile = cloudFileDirectory.getFileReference(FILE_NAME);
} catch (URISyntaxException | StorageException e) {
throw new RuntimeException(e);
}
}
@Override
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(Mono.fromCallable(() -> {
try {
cloudFile.upload(TestDataCreationHelper.createRandomInputStream(options.getSize()),
options.getSize());
return 1;
} catch (URISyntaxException | StorageException | IOException e) {
throw new RuntimeException(e);
}
})).then();
}
@Override
@Override
public Mono<Void> runAsync() {
throw new UnsupportedOperationException();
}
@Override
public Mono<Void> globalCleanupAsync() {
return Mono.fromCallable(() -> {
cloudFile.delete();
return 1;
}).then(super.globalCleanupAsync());
}
} | class DownloadToFileShareTest extends DirectoryTest<PerfStressOptions> {
private final File targetFile;
private final String targetFilePath;
private final CloudFile cloudFile;
public DownloadToFileShareTest(PerfStressOptions options) {
super(options);
try {
String fileName = "perfstress-file-" + UUID.randomUUID().toString();
targetFile = new File(UUID.randomUUID().toString());
targetFilePath = targetFile.getAbsolutePath();
cloudFile = cloudFileDirectory.getFileReference(fileName);
} catch (URISyntaxException | StorageException e) {
throw new RuntimeException(e);
}
}
@Override
public Mono<Void> setupAsync() {
return super.setupAsync()
.then(Mono.fromCallable(() -> {
try {
cloudFile.create(options.getSize());
cloudFile.upload(TestDataCreationHelper.createRandomInputStream(options.getSize()),
options.getSize());
return 1;
} catch (URISyntaxException | StorageException | IOException e) {
throw new RuntimeException(e);
}
})).then();
}
@Override
@Override
public Mono<Void> cleanupAsync() {
return Mono.fromCallable(() -> {
targetFile.delete();
return 1;
}).then(super.cleanupAsync());
}
@Override
public Mono<Void> runAsync() {
throw new UnsupportedOperationException();
}
} |
`randomByteBufferFlux` is safe to re-use as it'll create window `ByteBuffer`s each time it is consumed. | public Mono<Void> runAsync() {
return dataLakeFileAsyncClient.append(createRandomByteBufferFlux(options.getSize()), 0, options.getSize());
} | return dataLakeFileAsyncClient.append(createRandomByteBufferFlux(options.getSize()), 0, options.getSize()); | public Mono<Void> runAsync() {
return dataLakeFileAsyncClient.append(byteBufferFlux, 0, options.getSize());
} | class AppendFileDatalakeTest extends DirectoryTest<PerfStressOptions> {
private static final String FILE_NAME = "perfstress-file-" + UUID.randomUUID().toString();
protected final DataLakeFileClient dataLakeFileClient;
protected final DataLakeFileAsyncClient dataLakeFileAsyncClient;
public AppendFileDatalakeTest(PerfStressOptions options) {
super(options);
dataLakeFileClient = dataLakeDirectoryClient.getFileClient(FILE_NAME);
dataLakeFileAsyncClient = dataLakeDirectoryAsyncClient.getFileAsyncClient(FILE_NAME);
}
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(dataLakeFileAsyncClient.create())
.then();
}
@Override
public void run() {
dataLakeFileClient.append(TestDataCreationHelper.createRandomInputStream(options.getSize()),
0, options.getSize());
}
@Override
public Mono<Void> globalCleanupAsync() {
return dataLakeFileAsyncClient.delete()
.then(super.globalCleanupAsync())
.then();
}
} | class AppendFileDatalakeTest extends DirectoryTest<PerfStressOptions> {
private static final String FILE_NAME = "perfstress-file-" + UUID.randomUUID().toString();
protected final DataLakeFileClient dataLakeFileClient;
protected final DataLakeFileAsyncClient dataLakeFileAsyncClient;
protected final RepeatingInputStream inputStream;
protected final Flux<ByteBuffer> byteBufferFlux;
public AppendFileDatalakeTest(PerfStressOptions options) {
super(options);
dataLakeFileClient = dataLakeDirectoryClient.getFileClient(FILE_NAME);
dataLakeFileAsyncClient = dataLakeDirectoryAsyncClient.getFileAsyncClient(FILE_NAME);
inputStream = (RepeatingInputStream) TestDataCreationHelper.createRandomInputStream(options.getSize());
byteBufferFlux = createRandomByteBufferFlux(options.getSize());
}
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(dataLakeFileAsyncClient.create())
.then();
}
@Override
public void run() {
inputStream.reset();
dataLakeFileClient.append(TestDataCreationHelper.createRandomInputStream(options.getSize()),
0, options.getSize());
}
@Override
public Mono<Void> globalCleanupAsync() {
return dataLakeFileAsyncClient.delete()
.then(super.globalCleanupAsync())
.then();
}
} |
@g2vinay: Let's try reusing the `randomByteBufferFlux`. | public Mono<Void> runAsync() {
return dataLakeFileAsyncClient.append(createRandomByteBufferFlux(options.getSize()), 0, options.getSize());
} | return dataLakeFileAsyncClient.append(createRandomByteBufferFlux(options.getSize()), 0, options.getSize()); | public Mono<Void> runAsync() {
return dataLakeFileAsyncClient.append(byteBufferFlux, 0, options.getSize());
} | class AppendFileDatalakeTest extends DirectoryTest<PerfStressOptions> {
private static final String FILE_NAME = "perfstress-file-" + UUID.randomUUID().toString();
protected final DataLakeFileClient dataLakeFileClient;
protected final DataLakeFileAsyncClient dataLakeFileAsyncClient;
public AppendFileDatalakeTest(PerfStressOptions options) {
super(options);
dataLakeFileClient = dataLakeDirectoryClient.getFileClient(FILE_NAME);
dataLakeFileAsyncClient = dataLakeDirectoryAsyncClient.getFileAsyncClient(FILE_NAME);
}
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(dataLakeFileAsyncClient.create())
.then();
}
@Override
public void run() {
dataLakeFileClient.append(TestDataCreationHelper.createRandomInputStream(options.getSize()),
0, options.getSize());
}
@Override
public Mono<Void> globalCleanupAsync() {
return dataLakeFileAsyncClient.delete()
.then(super.globalCleanupAsync())
.then();
}
} | class AppendFileDatalakeTest extends DirectoryTest<PerfStressOptions> {
private static final String FILE_NAME = "perfstress-file-" + UUID.randomUUID().toString();
protected final DataLakeFileClient dataLakeFileClient;
protected final DataLakeFileAsyncClient dataLakeFileAsyncClient;
protected final RepeatingInputStream inputStream;
protected final Flux<ByteBuffer> byteBufferFlux;
public AppendFileDatalakeTest(PerfStressOptions options) {
super(options);
dataLakeFileClient = dataLakeDirectoryClient.getFileClient(FILE_NAME);
dataLakeFileAsyncClient = dataLakeDirectoryAsyncClient.getFileAsyncClient(FILE_NAME);
inputStream = (RepeatingInputStream) TestDataCreationHelper.createRandomInputStream(options.getSize());
byteBufferFlux = createRandomByteBufferFlux(options.getSize());
}
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(dataLakeFileAsyncClient.create())
.then();
}
@Override
public void run() {
inputStream.reset();
dataLakeFileClient.append(TestDataCreationHelper.createRandomInputStream(options.getSize()),
0, options.getSize());
}
@Override
public Mono<Void> globalCleanupAsync() {
return dataLakeFileAsyncClient.delete()
.then(super.globalCleanupAsync())
.then();
}
} |
We already have it in azure-core, updated to use that. | public ServiceTest(TOptions options) {
super(options);
String connectionString = System.getenv("STORAGE_CONNECTION_STRING");
if (CoreUtils.isNullOrEmpty(connectionString)) {
throw new IllegalStateException("Environment variable STORAGE_CONNECTION_STRING must be set");
}
StorageConnectionString storageConnectionString
= StorageConnectionString.create(connectionString, null);
StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint();
dataLakeServiceClient = new DataLakeServiceClientBuilder()
.endpoint(endpoint.getPrimaryUri())
.credential(new StorageSharedKeyCredential(storageConnectionString.getAccountName(),
storageConnectionString.getStorageAuthSettings().getAccount().getAccessKey()))
.buildClient();
dataLakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(endpoint.getPrimaryUri())
.credential(new StorageSharedKeyCredential(storageConnectionString.getAccountName(),
storageConnectionString.getStorageAuthSettings().getAccount().getAccessKey()))
.buildAsyncClient();
} | String connectionString = System.getenv("STORAGE_CONNECTION_STRING"); | public ServiceTest(TOptions options) {
super(options);
configuration = Configuration.getGlobalConfiguration().clone();
String connectionString = configuration.get("STORAGE_CONNECTION_STRING");
if (CoreUtils.isNullOrEmpty(connectionString)) {
throw new IllegalStateException("Environment variable STORAGE_CONNECTION_STRING must be set");
}
StorageConnectionString storageConnectionString
= StorageConnectionString.create(connectionString, null);
StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint();
dataLakeServiceClient = new DataLakeServiceClientBuilder()
.endpoint(endpoint.getPrimaryUri())
.credential(new StorageSharedKeyCredential(storageConnectionString.getAccountName(),
storageConnectionString.getStorageAuthSettings().getAccount().getAccessKey()))
.buildClient();
dataLakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(endpoint.getPrimaryUri())
.credential(new StorageSharedKeyCredential(storageConnectionString.getAccountName(),
storageConnectionString.getStorageAuthSettings().getAccount().getAccessKey()))
.buildAsyncClient();
} | class ServiceTest<TOptions extends PerfStressOptions> extends PerfStressTest<TOptions> {
protected final DataLakeServiceClient dataLakeServiceClient;
protected final DataLakeServiceAsyncClient dataLakeServiceAsyncClient;
} | class ServiceTest<TOptions extends PerfStressOptions> extends PerfStressTest<TOptions> {
protected final DataLakeServiceClient dataLakeServiceClient;
protected final DataLakeServiceAsyncClient dataLakeServiceAsyncClient;
private final Configuration configuration;
} |
The API expects the file to not exist already, so we can't use a single file per thread here. The Track 1 API can reuse the same file, so I'll file an issue here for the storage team to look at. | public void run() {
File file = new File(UUID.randomUUID().toString());
file.deleteOnExit();
shareFileClient.downloadToFile(file.getAbsolutePath());
} | File file = new File(UUID.randomUUID().toString()); | public void run() {
File file = new File(UUID.randomUUID().toString());
file.deleteOnExit();
shareFileClient.downloadToFile(file.getAbsolutePath());
} | class DownloadToFileShareTest extends DirectoryTest<PerfStressOptions> {
private static final String FILE_NAME = "perfstress-file-" + UUID.randomUUID().toString();
protected final ShareFileClient shareFileClient;
protected final ShareFileAsyncClient shareFileAsyncClient;
public DownloadToFileShareTest(PerfStressOptions options) {
super(options);
shareFileClient = shareDirectoryClient.getFileClient(FILE_NAME);
shareFileAsyncClient = shareDirectoryAsyncClient.getFileClient(FILE_NAME);
}
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(shareFileAsyncClient.create(options.getSize()))
.then(shareFileAsyncClient.upload(createRandomByteBufferFlux(options.getSize()), options.getSize()))
.then();
}
@Override
@Override
public Mono<Void> runAsync() {
File file = new File(UUID.randomUUID().toString());
file.deleteOnExit();
return shareFileAsyncClient.downloadToFile(file.getAbsolutePath()).then();
}
public Mono<Void> globalCleanupAsync() {
return shareFileAsyncClient.delete()
.then(super.globalCleanupAsync())
.then();
}
} | class DownloadToFileShareTest extends DirectoryTest<PerfStressOptions> {
protected final ShareFileClient shareFileClient;
protected final ShareFileAsyncClient shareFileAsyncClient;
public DownloadToFileShareTest(PerfStressOptions options) {
super(options);
String fileName = "perfstressdfile" + UUID.randomUUID().toString();
shareFileClient = shareDirectoryClient.getFileClient(fileName);
shareFileAsyncClient = shareDirectoryAsyncClient.getFileClient(fileName);
}
public Mono<Void> setupAsync() {
return super.setupAsync()
.then(shareFileAsyncClient.create(options.getSize()))
.then(shareFileAsyncClient.upload(createRandomByteBufferFlux(options.getSize()), options.getSize()))
.then();
}
@Override
@Override
public Mono<Void> runAsync() {
File file = new File(UUID.randomUUID().toString());
file.deleteOnExit();
return shareFileAsyncClient.downloadToFile(file.getAbsolutePath()).then();
}
} |
For datalake append API, reusing the byte buffer flux, doesn't work, for other APIs, it works. I'll file an issue for this. | public Mono<Void> runAsync() {
return dataLakeFileAsyncClient.append(createRandomByteBufferFlux(options.getSize()), 0, options.getSize());
} | return dataLakeFileAsyncClient.append(createRandomByteBufferFlux(options.getSize()), 0, options.getSize()); | public Mono<Void> runAsync() {
return dataLakeFileAsyncClient.append(byteBufferFlux, 0, options.getSize());
} | class AppendFileDatalakeTest extends DirectoryTest<PerfStressOptions> {
private static final String FILE_NAME = "perfstress-file-" + UUID.randomUUID().toString();
protected final DataLakeFileClient dataLakeFileClient;
protected final DataLakeFileAsyncClient dataLakeFileAsyncClient;
public AppendFileDatalakeTest(PerfStressOptions options) {
super(options);
dataLakeFileClient = dataLakeDirectoryClient.getFileClient(FILE_NAME);
dataLakeFileAsyncClient = dataLakeDirectoryAsyncClient.getFileAsyncClient(FILE_NAME);
}
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(dataLakeFileAsyncClient.create())
.then();
}
@Override
public void run() {
dataLakeFileClient.append(TestDataCreationHelper.createRandomInputStream(options.getSize()),
0, options.getSize());
}
@Override
public Mono<Void> globalCleanupAsync() {
return dataLakeFileAsyncClient.delete()
.then(super.globalCleanupAsync())
.then();
}
} | class AppendFileDatalakeTest extends DirectoryTest<PerfStressOptions> {
private static final String FILE_NAME = "perfstress-file-" + UUID.randomUUID().toString();
protected final DataLakeFileClient dataLakeFileClient;
protected final DataLakeFileAsyncClient dataLakeFileAsyncClient;
protected final RepeatingInputStream inputStream;
protected final Flux<ByteBuffer> byteBufferFlux;
public AppendFileDatalakeTest(PerfStressOptions options) {
super(options);
dataLakeFileClient = dataLakeDirectoryClient.getFileClient(FILE_NAME);
dataLakeFileAsyncClient = dataLakeDirectoryAsyncClient.getFileAsyncClient(FILE_NAME);
inputStream = (RepeatingInputStream) TestDataCreationHelper.createRandomInputStream(options.getSize());
byteBufferFlux = createRandomByteBufferFlux(options.getSize());
}
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(dataLakeFileAsyncClient.create())
.then();
}
@Override
public void run() {
inputStream.reset();
dataLakeFileClient.append(TestDataCreationHelper.createRandomInputStream(options.getSize()),
0, options.getSize());
}
@Override
public Mono<Void> globalCleanupAsync() {
return dataLakeFileAsyncClient.delete()
.then(super.globalCleanupAsync())
.then();
}
} |
In the other languages we have been adding a helper function to the base class for this: https://github.com/Azure/azure-sdk-for-net/blob/master/common/Perf/Azure.Test.Perf/PerfTest.cs#L67 | public ServiceTest(TOptions options) {
super(options);
configuration = Configuration.getGlobalConfiguration().clone();
String connectionString = configuration.get("STORAGE_CONNECTION_STRING");
if (CoreUtils.isNullOrEmpty(connectionString)) {
throw new IllegalStateException("Environment variable STORAGE_CONNECTION_STRING must be set");
}
BlobServiceClientBuilder builder = new BlobServiceClientBuilder()
.connectionString(connectionString)
.httpClient(PerfStressHttpClient.create(options));
blobServiceClient = builder.buildClient();
blobServiceAsyncClient = builder.buildAsyncClient();
} | String connectionString = configuration.get("STORAGE_CONNECTION_STRING"); | public ServiceTest(TOptions options) {
super(options);
configuration = Configuration.getGlobalConfiguration().clone();
String connectionString = configuration.get("STORAGE_CONNECTION_STRING");
if (CoreUtils.isNullOrEmpty(connectionString)) {
throw new IllegalStateException("Environment variable STORAGE_CONNECTION_STRING must be set");
}
BlobServiceClientBuilder builder = new BlobServiceClientBuilder()
.connectionString(connectionString);
blobServiceClient = builder.buildClient();
blobServiceAsyncClient = builder.buildAsyncClient();
} | class ServiceTest<TOptions extends PerfStressOptions> extends PerfStressTest<TOptions> {
protected final BlobServiceClient blobServiceClient;
protected final BlobServiceAsyncClient blobServiceAsyncClient;
private final Configuration configuration;
} | class ServiceTest<TOptions extends PerfStressOptions> extends PerfStressTest<TOptions> {
protected final BlobServiceClient blobServiceClient;
protected final BlobServiceAsyncClient blobServiceAsyncClient;
private final Configuration configuration;
} |
```suggestion dataLakeFileClient.uploadFromFile(TEMP_FILE_PATH, /* overwrite */ true); ``` | public void run() {
dataLakeFileClient.uploadFromFile(TEMP_FILE_PATH, true);
} | dataLakeFileClient.uploadFromFile(TEMP_FILE_PATH, true); | public void run() {
dataLakeFileClient.uploadFromFile(TEMP_FILE_PATH, true);
} | class UploadFromFileDatalakeTest extends FileTestBase<PerfStressOptions> {
private static final Path TEMP_FILE;
private static final String TEMP_FILE_PATH;
static {
try {
TEMP_FILE = Files.createTempFile(null, null);
TEMP_FILE_PATH = TEMP_FILE.toString();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public UploadFromFileDatalakeTest(PerfStressOptions options) {
super(options);
}
@Override
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync().then(createTempFile());
}
@Override
public Mono<Void> globalCleanupAsync() {
return deleteTempFile().then(super.globalCleanupAsync());
}
private Mono<Void> createTempFile() {
return Mono.fromCallable(() -> {
TestDataCreationHelper.writeToFile(TEMP_FILE_PATH, options.getSize(), DEFAULT_BUFFER_SIZE);
return 1;
}).then();
}
private Mono<Void> deleteTempFile() {
try {
Files.delete(TEMP_FILE);
return Mono.empty();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
@Override
public Mono<Void> runAsync() {
return dataLakeFileAsyncClient.uploadFromFile(TEMP_FILE_PATH, true);
}
} | class UploadFromFileDatalakeTest extends FileTestBase<PerfStressOptions> {
private static final Path TEMP_FILE;
private static final String TEMP_FILE_PATH;
static {
try {
TEMP_FILE = Files.createTempFile(null, null);
TEMP_FILE_PATH = TEMP_FILE.toString();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public UploadFromFileDatalakeTest(PerfStressOptions options) {
super(options);
}
@Override
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync().then(createTempFile());
}
@Override
public Mono<Void> globalCleanupAsync() {
return deleteTempFile().then(super.globalCleanupAsync());
}
private Mono<Void> createTempFile() {
return Mono.fromCallable(() -> {
TestDataCreationHelper.writeToFile(TEMP_FILE_PATH, options.getSize(), DEFAULT_BUFFER_SIZE);
return 1;
}).then();
}
private Mono<Void> deleteTempFile() {
try {
Files.delete(TEMP_FILE);
return Mono.empty();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
@Override
public Mono<Void> runAsync() {
return dataLakeFileAsyncClient.uploadFromFile(TEMP_FILE_PATH, true);
}
} |
I think I've seen this same code repeated many times so maybe move to helper method? | public Mono<Void> runAsync() {
return shareFileAsyncClient.download()
.map(b -> {
int readCount = 0;
int remaining = b.remaining();
while (readCount < remaining) {
int expectedReadCount = Math.min(remaining - readCount, BUFFER_SIZE);
b.get(buffer, 0, expectedReadCount);
readCount += expectedReadCount;
}
return 1;
}).then();
} | .map(b -> { | public Mono<Void> runAsync() {
return shareFileAsyncClient.download()
.map(b -> {
int readCount = 0;
int remaining = b.remaining();
while (readCount < remaining) {
int expectedReadCount = Math.min(remaining - readCount, BUFFER_SIZE);
b.get(buffer, 0, expectedReadCount);
readCount += expectedReadCount;
}
return 1;
}).then();
} | class DownloadFileShareTest extends DirectoryTest<PerfStressOptions> {
private static final int BUFFER_SIZE = 16 * 1024 * 1024;
private static final OutputStream DEV_NULL = new NullOutputStream();
private static final String FILE_NAME = "perfstress-filev11-" + UUID.randomUUID().toString();
protected final ShareFileClient shareFileClient;
protected final ShareFileAsyncClient shareFileAsyncClient;
private final byte[] buffer = new byte[BUFFER_SIZE];
public DownloadFileShareTest(PerfStressOptions options) {
super(options);
shareFileClient = shareDirectoryClient.getFileClient(FILE_NAME);
shareFileAsyncClient = shareDirectoryAsyncClient.getFileClient(FILE_NAME);
}
private static final Path TEMP_FILE;
static {
try {
TEMP_FILE = Files.createTempFile(null, null);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private Mono<Long> createTempFile() {
try (InputStream inputStream = createRandomInputStream(options.getSize());
OutputStream outputStream = new FileOutputStream(TEMP_FILE.toString())) {
return Mono.just(TestDataCreationHelper.copyStream(inputStream, outputStream, 8192));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(createTempFile())
.flatMap(dataSize -> shareFileAsyncClient.create(dataSize))
.then(shareFileAsyncClient.uploadFromFile(TEMP_FILE.toString()))
.then();
}
@Override
public void run() {
shareFileClient.download(DEV_NULL);
}
@Override
} | class DownloadFileShareTest extends DirectoryTest<PerfStressOptions> {
private static final int BUFFER_SIZE = 16 * 1024 * 1024;
private static final OutputStream DEV_NULL = new NullOutputStream();
private final Path tempFile;
protected final ShareFileClient shareFileClient;
protected final ShareFileAsyncClient shareFileAsyncClient;
private final byte[] buffer = new byte[BUFFER_SIZE];
public DownloadFileShareTest(PerfStressOptions options) {
super(options);
tempFile = setupFile();
String fileName = "perfstressdfilev11" + UUID.randomUUID().toString();
shareFileClient = shareDirectoryClient.getFileClient(fileName);
shareFileAsyncClient = shareDirectoryAsyncClient.getFileClient(fileName);
}
private Path setupFile() {
try {
return Files.createTempFile(null, null);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private Mono<Long> createTempFile() {
try (InputStream inputStream = createRandomInputStream(options.getSize());
OutputStream outputStream = new FileOutputStream(tempFile.toString())) {
return Mono.just(TestDataCreationHelper.copyStream(inputStream, outputStream, 8192));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public Mono<Void> setupAsync() {
return super.setupAsync()
.then(createTempFile())
.flatMap(dataSize -> shareFileAsyncClient.create(dataSize))
.then(shareFileAsyncClient.uploadFromFile(tempFile.toString()))
.then();
}
@Override
public void run() {
shareFileClient.download(DEV_NULL);
}
@Override
} |
Yes, I think an overload should be added with a boolean `overwrite` parameter. In the meantime, can you compare the perf of creating a new file every time, with using a single file but deleting the file after each download? I'm a little concerned about how much disk space this test could use if it keeps creating more and more files. | public void run() {
File file = new File(UUID.randomUUID().toString());
file.deleteOnExit();
shareFileClient.downloadToFile(file.getAbsolutePath());
} | File file = new File(UUID.randomUUID().toString()); | public void run() {
File file = new File(UUID.randomUUID().toString());
file.deleteOnExit();
shareFileClient.downloadToFile(file.getAbsolutePath());
} | class DownloadToFileShareTest extends DirectoryTest<PerfStressOptions> {
private static final String FILE_NAME = "perfstress-file-" + UUID.randomUUID().toString();
protected final ShareFileClient shareFileClient;
protected final ShareFileAsyncClient shareFileAsyncClient;
public DownloadToFileShareTest(PerfStressOptions options) {
super(options);
shareFileClient = shareDirectoryClient.getFileClient(FILE_NAME);
shareFileAsyncClient = shareDirectoryAsyncClient.getFileClient(FILE_NAME);
}
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(shareFileAsyncClient.create(options.getSize()))
.then(shareFileAsyncClient.upload(createRandomByteBufferFlux(options.getSize()), options.getSize()))
.then();
}
@Override
@Override
public Mono<Void> runAsync() {
File file = new File(UUID.randomUUID().toString());
file.deleteOnExit();
return shareFileAsyncClient.downloadToFile(file.getAbsolutePath()).then();
}
public Mono<Void> globalCleanupAsync() {
return shareFileAsyncClient.delete()
.then(super.globalCleanupAsync())
.then();
}
} | class DownloadToFileShareTest extends DirectoryTest<PerfStressOptions> {
protected final ShareFileClient shareFileClient;
protected final ShareFileAsyncClient shareFileAsyncClient;
public DownloadToFileShareTest(PerfStressOptions options) {
super(options);
String fileName = "perfstressdfile" + UUID.randomUUID().toString();
shareFileClient = shareDirectoryClient.getFileClient(fileName);
shareFileAsyncClient = shareDirectoryAsyncClient.getFileClient(fileName);
}
public Mono<Void> setupAsync() {
return super.setupAsync()
.then(shareFileAsyncClient.create(options.getSize()))
.then(shareFileAsyncClient.upload(createRandomByteBufferFlux(options.getSize()), options.getSize()))
.then();
}
@Override
@Override
public Mono<Void> runAsync() {
File file = new File(UUID.randomUUID().toString());
file.deleteOnExit();
return shareFileAsyncClient.downloadToFile(file.getAbsolutePath()).then();
}
} |
```suggestion cloudBlockBlob.upload(inputStream, options.getSize()); ``` | public void run() {
try {
inputStream.reset();
cloudBlockBlob.upload(TestDataCreationHelper.createRandomInputStream(options.getSize()), options.getSize());
} catch (StorageException | IOException e) {
throw new RuntimeException(e);
}
} | cloudBlockBlob.upload(TestDataCreationHelper.createRandomInputStream(options.getSize()), options.getSize()); | public void run() {
try {
inputStream.reset();
cloudBlockBlob.upload(TestDataCreationHelper.createRandomInputStream(options.getSize()), options.getSize());
} catch (StorageException | IOException e) {
throw new RuntimeException(e);
}
} | class UploadBlobTest extends BlobTestBase<PerfStressOptions> {
protected final RepeatingInputStream inputStream;
public UploadBlobTest(PerfStressOptions options) {
super(options);
inputStream = (RepeatingInputStream) TestDataCreationHelper.createRandomInputStream(options.getSize());
}
@Override
@Override
public Mono<Void> runAsync() {
throw new UnsupportedOperationException();
}
} | class UploadBlobTest extends BlobTestBase<PerfStressOptions> {
protected final RepeatingInputStream inputStream;
public UploadBlobTest(PerfStressOptions options) {
super(options);
inputStream = (RepeatingInputStream) TestDataCreationHelper.createRandomInputStream(options.getSize());
}
@Override
@Override
public Mono<Void> runAsync() {
throw new UnsupportedOperationException();
}
} |
I would rather throw. I dont want the user to think they were locking on something but they really werent | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConcurrencyControl concurrencyControl = options.getConcurrencyControl() == null ? ConcurrencyControl.E_TAG
: options.getConcurrencyControl();
BlobProperties properties = getProperties();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (concurrencyControl) {
case NONE:
if (requestConditions.getIfMatch() != null) {
throw logger.logExceptionAsError(generateNoneException("requestConditions.ifMatch", "E_TAG"));
}
if (this.client.getVersionId() != null) {
throw logger.logExceptionAsError(generateNoneException("client.versionId", "VERSION_ID"));
}
break;
case E_TAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(
new UnsupportedOperationException("Concurrency control type not supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | } | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobProperties properties = getPropertiesWithResponse(options.getRequestConditions(), null, null).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
private UnsupportedOperationException generateNoneException(String wrongValue, String toSet) {
return new UnsupportedOperationException(String.format("'%s' can not be set when 'concurrencyControl'"
+ " is set to NONE. Set 'concurrencyControl' to %s.", wrongValue, toSet));
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob snapshot.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* @see BlobServiceClient
* user delegation key.
* @return A {@code String} representing all SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing all SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} |
If the application happens to fail being created, it will cause NPE when load the authorized client. Maybe we should check whether the client is null in loadauthorizedclient method. | private ConfidentialClientApplication createApp(ClientRegistration clientRegistration) {
String authorizationUri = clientRegistration.getProviderDetails().getAuthorizationUri();
String authority = interceptAuthorizationUri(authorizationUri);
IClientSecret clientCredential = ClientCredentialFactory.createFromSecret(clientRegistration.getClientSecret());
try {
return ConfidentialClientApplication.builder(clientRegistration.getClientId(), clientCredential)
.authority(authority)
.build();
} catch (MalformedURLException e) {
LOGGER.error("Failed to create ConfidentialClientApplication", e);
}
return null;
} | return null; | private ConfidentialClientApplication createApp(ClientRegistration clientRegistration) {
String authorizationUri = clientRegistration.getProviderDetails().getAuthorizationUri();
String authority = interceptAuthorizationUri(authorizationUri);
IClientSecret clientCredential = ClientCredentialFactory
.createFromSecret(clientRegistration.getClientSecret());
try {
return ConfidentialClientApplication.builder(clientRegistration.getClientId(), clientCredential)
.authority(authority)
.build();
} catch (MalformedURLException e) {
LOGGER.error("Failed to create ConfidentialClientApplication", e);
}
return null;
} | class AADOAuth2OboAuthorizedClientRepository implements OAuth2AuthorizedClientRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(AADOAuth2OboAuthorizedClientRepository.class);
private static final String OBO_AUTHORIZEDCLIENT_PREFIX = "obo_authorizedclient_";
private final AzureClientRegistrationRepository azureClientRegistrationRepository;
private final Map<String, ConfidentialClientApplication> confidentialClientApplicationMap = new HashMap<>();
public AADOAuth2OboAuthorizedClientRepository(AzureClientRegistrationRepository azureClientRegistrationRepository) {
this.azureClientRegistrationRepository = azureClientRegistrationRepository;
Iterator<ClientRegistration> iterator = azureClientRegistrationRepository.iterator();
while (iterator.hasNext()) {
ClientRegistration next = iterator.next();
this.confidentialClientApplicationMap.put(next.getRegistrationId(), createApp(next));
}
}
@Override
@SuppressWarnings({"unchecked", "rawtypes"})
public <T extends OAuth2AuthorizedClient> T loadAuthorizedClient(String registrationId,
Authentication authentication,
HttpServletRequest request) {
try {
String oboAuthorizedClientAttributeName = OBO_AUTHORIZEDCLIENT_PREFIX + registrationId;
if (request.getAttribute(oboAuthorizedClientAttributeName) != null) {
return (T) request.getAttribute(oboAuthorizedClientAttributeName);
}
if (!(authentication instanceof AbstractOAuth2TokenAuthenticationToken)) {
throw new IllegalStateException("Not support token implementation");
}
AbstractOAuth2TokenAuthenticationToken<AbstractOAuth2Token> authenticationToken =
(AbstractOAuth2TokenAuthenticationToken) authentication;
ClientRegistration clientRegistration =
azureClientRegistrationRepository.findByRegistrationId(registrationId);
String accessToken = authenticationToken.getToken().getTokenValue();
OnBehalfOfParameters parameters = OnBehalfOfParameters
.builder(clientRegistration.getScopes(), new UserAssertion(accessToken))
.build();
ConfidentialClientApplication clientApplication =
getClientApplication(clientRegistration.getRegistrationId());
String oboAccessToken = clientApplication.acquireToken(parameters).get().accessToken();
JWT parser = JWTParser.parse(oboAccessToken);
Date iat = (Date) parser.getJWTClaimsSet().getClaim("iat");
Date exp = (Date) parser.getJWTClaimsSet().getClaim("exp");
OAuth2AccessToken oAuth2AccessToken = new OAuth2AccessToken(OAuth2AccessToken.TokenType.BEARER,
oboAccessToken,
Instant.ofEpochMilli(iat.getTime()),
Instant.ofEpochMilli(exp.getTime()));
OAuth2AuthorizedClient oAuth2AuthorizedClient = new OAuth2AuthorizedClient(clientRegistration,
authenticationToken.getName(), oAuth2AccessToken);
request.setAttribute(oboAuthorizedClientAttributeName, (T) oAuth2AuthorizedClient);
return (T) oAuth2AuthorizedClient;
} catch (Throwable throwable) {
LOGGER.error("Failed to loadAuthorizedClient", throwable);
}
return null;
}
@Override
public void saveAuthorizedClient(OAuth2AuthorizedClient oAuth2AuthorizedClient, Authentication authentication,
HttpServletRequest httpServletRequest, HttpServletResponse httpServletResponse) {
}
@Override
public void removeAuthorizedClient(String s, Authentication authentication,
HttpServletRequest httpServletRequest, HttpServletResponse httpServletResponse) {
}
ConfidentialClientApplication getClientApplication(String registrationId) {
return confidentialClientApplicationMap.get(registrationId);
}
private String interceptAuthorizationUri(String authorizationUri) {
int count = 0;
int slashNumber = 4;
for (int i = 0; i < authorizationUri.length(); i++) {
if (authorizationUri.charAt(i) == '/') {
count++;
}
if (count == slashNumber) {
return authorizationUri.substring(0, i + 1);
}
}
return null;
}
} | class AADOAuth2OboAuthorizedClientRepository implements OAuth2AuthorizedClientRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(AADOAuth2OboAuthorizedClientRepository.class);
private static final String OBO_AUTHORIZEDCLIENT_PREFIX = "obo_authorizedclient_";
private final AzureClientRegistrationRepository azureClientRegistrationRepository;
private final Map<String, ConfidentialClientApplication> confidentialClientApplicationMap = new HashMap<>();
public AADOAuth2OboAuthorizedClientRepository(AzureClientRegistrationRepository azureClientRegistrationRepository) {
this.azureClientRegistrationRepository = azureClientRegistrationRepository;
Iterator<ClientRegistration> iterator = azureClientRegistrationRepository.iterator();
while (iterator.hasNext()) {
ClientRegistration next = iterator.next();
this.confidentialClientApplicationMap.put(next.getRegistrationId(), createApp(next));
}
}
@Override
@SuppressWarnings({ "unchecked", "rawtypes" })
public <T extends OAuth2AuthorizedClient> T loadAuthorizedClient(String registrationId,
Authentication authentication,
HttpServletRequest request) {
String oboAuthorizedClientAttributeName = OBO_AUTHORIZEDCLIENT_PREFIX + registrationId;
if (request.getAttribute(oboAuthorizedClientAttributeName) != null) {
return (T) request.getAttribute(oboAuthorizedClientAttributeName);
}
if (!(authentication instanceof AbstractOAuth2TokenAuthenticationToken)) {
throw new IllegalStateException("Unsupported token implementation " + authentication.getClass());
}
try {
String accessToken =
((AbstractOAuth2TokenAuthenticationToken<?>) authentication).getToken().getTokenValue();
ClientRegistration clientRegistration =
azureClientRegistrationRepository.findByRegistrationId(registrationId);
if (clientRegistration == null) {
LOGGER.warn("Not found the ClientRegistration.");
return null;
}
OnBehalfOfParameters parameters = OnBehalfOfParameters
.builder(clientRegistration.getScopes(), new UserAssertion(accessToken))
.build();
ConfidentialClientApplication clientApplication =
getClientApplication(clientRegistration.getRegistrationId());
if (null == clientApplication) {
LOGGER.warn("Not found the " + clientRegistration.getRegistrationId()
+ " ConfidentialClientApplication.");
return null;
}
String oboAccessToken = clientApplication.acquireToken(parameters).get().accessToken();
JWT parser = JWTParser.parse(oboAccessToken);
Date iat = (Date) parser.getJWTClaimsSet().getClaim("iat");
Date exp = (Date) parser.getJWTClaimsSet().getClaim("exp");
OAuth2AccessToken oAuth2AccessToken = new OAuth2AccessToken(OAuth2AccessToken.TokenType.BEARER,
oboAccessToken,
Instant.ofEpochMilli(iat.getTime()),
Instant.ofEpochMilli(exp.getTime()));
OAuth2AuthorizedClient oAuth2AuthorizedClient = new OAuth2AuthorizedClient(clientRegistration,
authentication.getName(), oAuth2AccessToken);
request.setAttribute(oboAuthorizedClientAttributeName, (T) oAuth2AuthorizedClient);
return (T) oAuth2AuthorizedClient;
} catch (Throwable throwable) {
LOGGER.error("Failed to load authorized client.", throwable);
}
return null;
}
@Override
public void saveAuthorizedClient(OAuth2AuthorizedClient oAuth2AuthorizedClient, Authentication authentication,
HttpServletRequest httpServletRequest, HttpServletResponse httpServletResponse) {
}
@Override
public void removeAuthorizedClient(String clientRegistrationId, Authentication authentication,
HttpServletRequest httpServletRequest, HttpServletResponse httpServletResponse) {
}
ConfidentialClientApplication getClientApplication(String registrationId) {
return confidentialClientApplicationMap.get(registrationId);
}
private String interceptAuthorizationUri(String authorizationUri) {
int count = 0;
int slashNumber = 4;
for (int i = 0; i < authorizationUri.length(); i++) {
if (authorizationUri.charAt(i) == '/') {
count++;
}
if (count == slashNumber) {
return authorizationUri.substring(0, i + 1);
}
}
return null;
}
} |
This method can just have `retrun EventGridDeserializer.getData(event.getData());` | public BinaryData getData() {
if (event.getData() != null) {
return EventGridDeserializer.getData(event.getData());
}
return null;
} | return EventGridDeserializer.getData(event.getData()); | public BinaryData getData() {
return EventGridDeserializer.getData(event.getData());
} | class EventGridEvent {
private final com.azure.messaging.eventgrid.implementation.models.EventGridEvent event;
private static final ClientLogger logger = new ClientLogger(EventGridEvent.class);
/**
* Create a new instance of the EventGridEvent, with the given required fields.
* @param subject the subject of the event.
* @param eventType the type of the event, e.g. "Contoso.Items.ItemReceived".
* @param data the data associated with this event.
* @param dataVersion the version of the data sent along with the event.
*/
public EventGridEvent(String subject, String eventType, Object data, String dataVersion) {
if (CoreUtils.isNullOrEmpty(subject)) {
throw logger.logExceptionAsError(new IllegalArgumentException("subject cannot be null or empty"));
} else if (CoreUtils.isNullOrEmpty(eventType)) {
throw logger.logExceptionAsError(new IllegalArgumentException("event type cannot be null or empty"));
} else if (CoreUtils.isNullOrEmpty(dataVersion)) {
throw logger.logExceptionAsError(new IllegalArgumentException("data version cannot be null or empty"));
}
this.event = new com.azure.messaging.eventgrid.implementation.models.EventGridEvent()
.setEventTime(OffsetDateTime.now())
.setId(UUID.randomUUID().toString())
.setSubject(subject)
.setEventType(eventType)
.setData(data)
.setDataVersion(dataVersion);
}
/**
* Deserialize the {@link EventGridEvent} from a JSON string.
* @param eventGridJsonString the JSON payload containing one or more events.
*
* @return all of the events in the payload deserialized as {@link EventGridEvent}s.
* @throws IllegalArgumentException if the input parameter isn't a JSON string for a eventgrid event
* or an array of it.
*/
public static List<EventGridEvent> fromString(String eventGridJsonString) {
return EventGridDeserializer.deserializeEventGridEvents(eventGridJsonString);
}
/**
* Get the unique id associated with this event.
* @return the id.
*/
public String getId() {
return this.event.getId();
}
/**
* Set the unique id of the event. Note that a random id has already been set by default.
* @param id the unique id to set.
*
* @return the event itself.
*/
public EventGridEvent setId(String id) {
if (CoreUtils.isNullOrEmpty(id)) {
throw logger.logExceptionAsError(new IllegalArgumentException("id cannot be null or empty"));
}
this.event.setId(id);
return this;
}
/**
* Get the topic associated with this event if it is associated with a domain.
* @return the topic, or null if the topic is not set (i.e. the event came from or is going to a domain).
*/
public String getTopic() {
return this.event.getTopic();
}
/**
* Set the topic associated with this event. Used to route events from domain endpoints.
* @param topic the topic to set.
*
* @return the event itself.
*/
public EventGridEvent setTopic(String topic) {
this.event.setTopic(topic);
return this;
}
/**
* Get the subject associated with this event.
* @return the subject.
*/
public String getSubject() {
return this.event.getSubject();
}
/**
* Get whether this event is a system event.
* @see SystemEventNames
* @return {@code true} if the even is a system event, or {@code false} otherwise.
*/
public boolean isSystemEvent() {
String eventType = this.getEventType();
return SystemEventNames.getSystemEventMappings().containsKey(eventType);
}
/**
* Convert the event's data into the system event data if the event is a system event.
* @see SystemEventNames
* @return The system event if the event is a system event, or {@code null} if it's not.
*/
public Object asSystemEventData() {
if (event.getData() == null) {
return null;
}
return EventGridDeserializer.getSystemEventData(this.getData(), event.getEventType());
}
/**
* Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into
* a String, an Object, or a byte[].
* @return A {@link BinaryData} that wraps the this event's data payload.
*/
/**
* Get the type of this event.
* @return the event type.
*/
public String getEventType() {
return this.event.getEventType();
}
/**
* Get the time associated with the occurrence of this event.
* @return the event time.
*/
public OffsetDateTime getEventTime() {
return this.event.getEventTime();
}
/**
* Set the time associated with the event. Note that a default time has already been set when the event was
* constructed.
* @param time the time to set.
*
* @return the event itself.
*/
public EventGridEvent setEventTime(OffsetDateTime time) {
this.event.setEventTime(time);
return this;
}
/**
* Get the version of the data in the event. This can be used to specify versioning of event data schemas over time.
* @return the version of the event data.
*/
public String getDataVersion() {
return this.event.getDataVersion();
}
EventGridEvent(com.azure.messaging.eventgrid.implementation.models.EventGridEvent impl) {
this.event = impl;
}
com.azure.messaging.eventgrid.implementation.models.EventGridEvent toImpl() {
return this.event;
}
} | class EventGridEvent {
private final com.azure.messaging.eventgrid.implementation.models.EventGridEvent event;
private static final ClientLogger logger = new ClientLogger(EventGridEvent.class);
/**
* Create a new instance of the EventGridEvent, with the given required fields.
* @param subject the subject of the event.
* @param eventType the type of the event, e.g. "Contoso.Items.ItemReceived".
* @param data the data associated with this event.
* @param dataVersion the version of the data sent along with the event.
*
* @throws IllegalArgumentException if subject, eventType or data is {@code null} or empty.
*/
public EventGridEvent(String subject, String eventType, Object data, String dataVersion) {
if (CoreUtils.isNullOrEmpty(subject)) {
throw logger.logExceptionAsError(new IllegalArgumentException("'subject' cannot be null or empty."));
} else if (CoreUtils.isNullOrEmpty(eventType)) {
throw logger.logExceptionAsError(new IllegalArgumentException("'eventType' cannot be null or empty."));
} else if (CoreUtils.isNullOrEmpty(dataVersion)) {
throw logger.logExceptionAsError(new IllegalArgumentException("'dataVersion' cannot be null or empty."));
}
this.event = new com.azure.messaging.eventgrid.implementation.models.EventGridEvent()
.setEventTime(OffsetDateTime.now())
.setId(UUID.randomUUID().toString())
.setSubject(subject)
.setEventType(eventType)
.setData(data)
.setDataVersion(dataVersion);
}
/**
* Deserialize the {@link EventGridEvent} from a JSON string.
* @param eventGridJsonString the JSON payload containing one or more events.
*
* @return all of the events in the payload deserialized as {@link EventGridEvent EventGridEvents}.
* @throws IllegalArgumentException if eventGridJsonString isn't a JSON string for a eventgrid event
* or an array of it.
* @throws NullPointerException if eventGridJsonString is {@code null}.
*/
public static List<EventGridEvent> fromString(String eventGridJsonString) {
return EventGridDeserializer.deserializeEventGridEvents(eventGridJsonString);
}
/**
* Get the unique id associated with this event.
* @return the id.
*/
public String getId() {
return this.event.getId();
}
/**
* Set the unique id of the event. Note that a random id has already been set by default.
* @param id the unique id to set.
*
* @return the event itself.
*/
public EventGridEvent setId(String id) {
if (CoreUtils.isNullOrEmpty(id)) {
throw logger.logExceptionAsError(new IllegalArgumentException("'id' cannot be null or empty."));
}
this.event.setId(id);
return this;
}
/**
* Get the topic associated with this event if it is associated with a domain.
* @return the topic, or null if the topic is not set (i.e. the event came from or is going to a domain).
*/
public String getTopic() {
return this.event.getTopic();
}
/**
* Set the topic associated with this event. Used to route events from domain endpoints.
* @param topic the topic to set.
*
* @return the event itself.
*/
public EventGridEvent setTopic(String topic) {
this.event.setTopic(topic);
return this;
}
/**
* Get the subject associated with this event.
* @return the subject.
*/
public String getSubject() {
return this.event.getSubject();
}
/**
* Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into
* a String, an Object, or a byte[].
* @return A {@link BinaryData} that wraps the this event's data payload.
*/
/**
* Get the type of this event.
* @return the event type.
*/
public String getEventType() {
return this.event.getEventType();
}
/**
* Get the time associated with the occurrence of this event.
* @return the event time.
*/
public OffsetDateTime getEventTime() {
return this.event.getEventTime();
}
/**
* Set the time associated with the event. Note that a default time has already been set when the event was
* constructed.
* @param time the time to set.
*
* @return the event itself.
*/
public EventGridEvent setEventTime(OffsetDateTime time) {
this.event.setEventTime(time);
return this;
}
/**
* Get the version of the data in the event. This can be used to specify versioning of event data schemas over time.
* @return the version of the event data.
*/
public String getDataVersion() {
return this.event.getDataVersion();
}
EventGridEvent(com.azure.messaging.eventgrid.implementation.models.EventGridEvent impl) {
this.event = impl;
}
com.azure.messaging.eventgrid.implementation.models.EventGridEvent toImpl() {
return this.event;
}
} |
Do we want to log an informational or warning message here instead of throwing? | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConcurrencyControl concurrencyControl = options.getConcurrencyControl() == null ? ConcurrencyControl.E_TAG
: options.getConcurrencyControl();
BlobProperties properties = getProperties();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (concurrencyControl) {
case NONE:
if (requestConditions.getIfMatch() != null) {
throw logger.logExceptionAsError(generateNoneException("requestConditions.ifMatch", "E_TAG"));
}
if (this.client.getVersionId() != null) {
throw logger.logExceptionAsError(generateNoneException("client.versionId", "VERSION_ID"));
}
break;
case E_TAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(
new UnsupportedOperationException("Concurrency control type not supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | } | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobProperties properties = getPropertiesWithResponse(options.getRequestConditions(), null, null).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
private UnsupportedOperationException generateNoneException(String wrongValue, String toSet) {
return new UnsupportedOperationException(String.format("'%s' can not be set when 'concurrencyControl'"
+ " is set to NONE. Set 'concurrencyControl' to %s.", wrongValue, toSet));
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob snapshot.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* @see BlobServiceClient
* user delegation key.
* @return A {@code String} representing all SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing all SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} |
Should the error types being thrown here be IllegalStateException instead of UnsupportedOperationException? They feel more like bad states and not like operations that can't be perfromed. | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConcurrencyControl concurrencyControl = options.getConcurrencyControl() == null ? ConcurrencyControl.E_TAG
: options.getConcurrencyControl();
BlobProperties properties = getProperties();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (concurrencyControl) {
case NONE:
if (requestConditions.getIfMatch() != null) {
throw logger.logExceptionAsError(generateNoneException("requestConditions.ifMatch", "E_TAG"));
}
if (this.client.getVersionId() != null) {
throw logger.logExceptionAsError(generateNoneException("client.versionId", "VERSION_ID"));
}
break;
case E_TAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(
new UnsupportedOperationException("Concurrency control type not supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | new UnsupportedOperationException("Concurrency control type not supported.")); | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobProperties properties = getPropertiesWithResponse(options.getRequestConditions(), null, null).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
private UnsupportedOperationException generateNoneException(String wrongValue, String toSet) {
return new UnsupportedOperationException(String.format("'%s' can not be set when 'concurrencyControl'"
+ " is set to NONE. Set 'concurrencyControl' to %s.", wrongValue, toSet));
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob snapshot.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* @see BlobServiceClient
* user delegation key.
* @return A {@code String} representing all SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing all SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} |
I think we should be using the customer's requestConditions for this call and THEN locking on the resulting etag. | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConcurrencyControl concurrencyControl = options.getConcurrencyControl() == null ? ConcurrencyControl.E_TAG
: options.getConcurrencyControl();
BlobProperties properties = getProperties();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (concurrencyControl) {
case NONE:
if (requestConditions.getIfMatch() != null) {
throw logger.logExceptionAsError(generateNoneException("requestConditions.ifMatch", "E_TAG"));
}
if (this.client.getVersionId() != null) {
throw logger.logExceptionAsError(generateNoneException("client.versionId", "VERSION_ID"));
}
break;
case E_TAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(
new UnsupportedOperationException("Concurrency control type not supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | BlobProperties properties = getProperties(); | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobProperties properties = getPropertiesWithResponse(options.getRequestConditions(), null, null).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
private UnsupportedOperationException generateNoneException(String wrongValue, String toSet) {
return new UnsupportedOperationException(String.format("'%s' can not be set when 'concurrencyControl'"
+ " is set to NONE. Set 'concurrencyControl' to %s.", wrongValue, toSet));
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob snapshot.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* @see BlobServiceClient
* user delegation key.
* @return A {@code String} representing all SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing all SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} |
I think we want to throw here if etag is specified and throw above if version is specified | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConcurrencyControl concurrencyControl = options.getConcurrencyControl() == null ? ConcurrencyControl.E_TAG
: options.getConcurrencyControl();
BlobProperties properties = getProperties();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (concurrencyControl) {
case NONE:
if (requestConditions.getIfMatch() != null) {
throw logger.logExceptionAsError(generateNoneException("requestConditions.ifMatch", "E_TAG"));
}
if (this.client.getVersionId() != null) {
throw logger.logExceptionAsError(generateNoneException("client.versionId", "VERSION_ID"));
}
break;
case E_TAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(
new UnsupportedOperationException("Concurrency control type not supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | client = this.client.getVersionClient(versionId); | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobProperties properties = getPropertiesWithResponse(options.getRequestConditions(), null, null).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
private UnsupportedOperationException generateNoneException(String wrongValue, String toSet) {
return new UnsupportedOperationException(String.format("'%s' can not be set when 'concurrencyControl'"
+ " is set to NONE. Set 'concurrencyControl' to %s.", wrongValue, toSet));
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob snapshot.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* @see BlobServiceClient
* user delegation key.
* @return A {@code String} representing all SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing all SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} |
I think its more of a Illegal Argument Exception. I can change it to that | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConcurrencyControl concurrencyControl = options.getConcurrencyControl() == null ? ConcurrencyControl.E_TAG
: options.getConcurrencyControl();
BlobProperties properties = getProperties();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (concurrencyControl) {
case NONE:
if (requestConditions.getIfMatch() != null) {
throw logger.logExceptionAsError(generateNoneException("requestConditions.ifMatch", "E_TAG"));
}
if (this.client.getVersionId() != null) {
throw logger.logExceptionAsError(generateNoneException("client.versionId", "VERSION_ID"));
}
break;
case E_TAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(
new UnsupportedOperationException("Concurrency control type not supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | new UnsupportedOperationException("Concurrency control type not supported.")); | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobProperties properties = getPropertiesWithResponse(options.getRequestConditions(), null, null).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
private UnsupportedOperationException generateNoneException(String wrongValue, String toSet) {
return new UnsupportedOperationException(String.format("'%s' can not be set when 'concurrencyControl'"
+ " is set to NONE. Set 'concurrencyControl' to %s.", wrongValue, toSet));
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob snapshot.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* @see BlobServiceClient
* user delegation key.
* @return A {@code String} representing all SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing all SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} |
How about other requestConditions? | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConcurrencyControl concurrencyControl = options.getConcurrencyControl() == null ? ConcurrencyControl.ETAG
: options.getConcurrencyControl();
BlobProperties properties = getPropertiesWithResponse(options.getRequestConditions(), null, null).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (concurrencyControl) {
case NONE:
if (requestConditions.getIfMatch() != null) {
throw logger.logExceptionAsError(generateControlException("requestConditions.ifMatch",
ConcurrencyControl.NONE.toString(), ConcurrencyControl.ETAG.toString()));
}
if (this.client.getVersionId() != null) {
throw logger.logExceptionAsError(generateControlException("client.versionId",
ConcurrencyControl.NONE.toString(), ConcurrencyControl.VERSION_ID.toString()));
}
break;
case ETAG:
if (this.client.getVersionId() != null) {
throw logger.logExceptionAsError(generateControlException("client.versionId",
ConcurrencyControl.ETAG.toString(), ConcurrencyControl.VERSION_ID.toString()));
}
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (requestConditions.getIfMatch() != null) {
throw logger.logExceptionAsError(generateControlException("requestConditions.ifMatch",
ConcurrencyControl.VERSION_ID.toString(), ConcurrencyControl.ETAG.toString()));
}
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | } | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobProperties properties = getPropertiesWithResponse(options.getRequestConditions(), null, null).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
private IllegalStateException generateControlException(String wrongVariable, String originalControl,
String expectedControl) {
return new IllegalStateException(String.format("'%s' can not be set when 'concurrencyControl'"
+ " is set to '%s'. Set 'concurrencyControl' to '%s'.", wrongVariable, originalControl, expectedControl));
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob snapshot.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* @see BlobServiceClient
* user delegation key.
* @return A {@code String} representing all SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing all SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} |
I think only etag is checked because we only lock on the etag. The other request conditions are only used in the initial get properties and then will be replaced by the etag (if that's the control type specified). Although I can see how it might be odd to specify LMT and version at the same time.... | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConcurrencyControl concurrencyControl = options.getConcurrencyControl() == null ? ConcurrencyControl.ETAG
: options.getConcurrencyControl();
BlobProperties properties = getPropertiesWithResponse(options.getRequestConditions(), null, null).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (concurrencyControl) {
case NONE:
if (requestConditions.getIfMatch() != null) {
throw logger.logExceptionAsError(generateControlException("requestConditions.ifMatch",
ConcurrencyControl.NONE.toString(), ConcurrencyControl.ETAG.toString()));
}
if (this.client.getVersionId() != null) {
throw logger.logExceptionAsError(generateControlException("client.versionId",
ConcurrencyControl.NONE.toString(), ConcurrencyControl.VERSION_ID.toString()));
}
break;
case ETAG:
if (this.client.getVersionId() != null) {
throw logger.logExceptionAsError(generateControlException("client.versionId",
ConcurrencyControl.ETAG.toString(), ConcurrencyControl.VERSION_ID.toString()));
}
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (requestConditions.getIfMatch() != null) {
throw logger.logExceptionAsError(generateControlException("requestConditions.ifMatch",
ConcurrencyControl.VERSION_ID.toString(), ConcurrencyControl.ETAG.toString()));
}
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | } | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobProperties properties = getPropertiesWithResponse(options.getRequestConditions(), null, null).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
private IllegalStateException generateControlException(String wrongVariable, String originalControl,
String expectedControl) {
return new IllegalStateException(String.format("'%s' can not be set when 'concurrencyControl'"
+ " is set to '%s'. Set 'concurrencyControl' to '%s'.", wrongVariable, originalControl, expectedControl));
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob snapshot.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* @see BlobServiceClient
* user delegation key.
* @return A {@code String} representing all SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing all SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} |
Maybe they just want that extra level of assurance :P | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConcurrencyControl concurrencyControl = options.getConcurrencyControl() == null ? ConcurrencyControl.ETAG
: options.getConcurrencyControl();
BlobProperties properties = getPropertiesWithResponse(options.getRequestConditions(), null, null).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (concurrencyControl) {
case NONE:
if (requestConditions.getIfMatch() != null) {
throw logger.logExceptionAsError(generateControlException("requestConditions.ifMatch",
ConcurrencyControl.NONE.toString(), ConcurrencyControl.ETAG.toString()));
}
if (this.client.getVersionId() != null) {
throw logger.logExceptionAsError(generateControlException("client.versionId",
ConcurrencyControl.NONE.toString(), ConcurrencyControl.VERSION_ID.toString()));
}
break;
case ETAG:
if (this.client.getVersionId() != null) {
throw logger.logExceptionAsError(generateControlException("client.versionId",
ConcurrencyControl.ETAG.toString(), ConcurrencyControl.VERSION_ID.toString()));
}
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (requestConditions.getIfMatch() != null) {
throw logger.logExceptionAsError(generateControlException("requestConditions.ifMatch",
ConcurrencyControl.VERSION_ID.toString(), ConcurrencyControl.ETAG.toString()));
}
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | } | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobProperties properties = getPropertiesWithResponse(options.getRequestConditions(), null, null).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
private IllegalStateException generateControlException(String wrongVariable, String originalControl,
String expectedControl) {
return new IllegalStateException(String.format("'%s' can not be set when 'concurrencyControl'"
+ " is set to '%s'. Set 'concurrencyControl' to '%s'.", wrongVariable, originalControl, expectedControl));
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob snapshot.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* @see BlobServiceClient
* user delegation key.
* @return A {@code String} representing all SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing all SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} |
If someone is using both versionid and ifmatch condition this is going to break them. Maybe we shouldn't default to "ETAG" few lines above but rather apply this switch block only if consistency controll was explicitly set. Otherwise use old logic? | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null ? ConsistentReadControl.ETAG
: options.getConsistentReadControl();
BlobProperties properties = getPropertiesWithResponse(options.getRequestConditions(), null, null).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
if (requestConditions.getIfMatch() != null) {
throw logger.logExceptionAsError(generateConsistentReadControlException("requestConditions.ifMatch",
ConsistentReadControl.NONE.toString(), ConsistentReadControl.ETAG.toString()));
}
if (this.client.getVersionId() != null) {
throw logger.logExceptionAsError(generateConsistentReadControlException("client.versionId",
ConsistentReadControl.NONE.toString(), ConsistentReadControl.VERSION_ID.toString()));
}
break;
case ETAG:
if (this.client.getVersionId() != null) {
throw logger.logExceptionAsError(generateConsistentReadControlException("client.versionId",
ConsistentReadControl.ETAG.toString(), ConsistentReadControl.VERSION_ID.toString()));
}
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (requestConditions.getIfMatch() != null) {
throw logger.logExceptionAsError(generateConsistentReadControlException("requestConditions.ifMatch",
ConsistentReadControl.VERSION_ID.toString(), ConsistentReadControl.ETAG.toString()));
}
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | } | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobProperties properties = getPropertiesWithResponse(options.getRequestConditions(), null, null).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
private IllegalStateException generateConsistentReadControlException(String wrongVariable, String originalControl,
String expectedControl) {
return new IllegalStateException(String.format("'%s' can not be set when 'concurrencyControl'"
+ " is set to '%s'. Set 'concurrencyControl' to '%s'.", wrongVariable, originalControl, expectedControl));
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob snapshot.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* @see BlobServiceClient
* user delegation key.
* @return A {@code String} representing all SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing all SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} |
I think Rick and I already talked about this - there really is no reason to be using both version and ifMatch conditions (since theyre basically duplicating work). | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null ? ConsistentReadControl.ETAG
: options.getConsistentReadControl();
BlobProperties properties = getPropertiesWithResponse(options.getRequestConditions(), null, null).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
if (requestConditions.getIfMatch() != null) {
throw logger.logExceptionAsError(generateConsistentReadControlException("requestConditions.ifMatch",
ConsistentReadControl.NONE.toString(), ConsistentReadControl.ETAG.toString()));
}
if (this.client.getVersionId() != null) {
throw logger.logExceptionAsError(generateConsistentReadControlException("client.versionId",
ConsistentReadControl.NONE.toString(), ConsistentReadControl.VERSION_ID.toString()));
}
break;
case ETAG:
if (this.client.getVersionId() != null) {
throw logger.logExceptionAsError(generateConsistentReadControlException("client.versionId",
ConsistentReadControl.ETAG.toString(), ConsistentReadControl.VERSION_ID.toString()));
}
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (requestConditions.getIfMatch() != null) {
throw logger.logExceptionAsError(generateConsistentReadControlException("requestConditions.ifMatch",
ConsistentReadControl.VERSION_ID.toString(), ConsistentReadControl.ETAG.toString()));
}
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | } | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobProperties properties = getPropertiesWithResponse(options.getRequestConditions(), null, null).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
private IllegalStateException generateConsistentReadControlException(String wrongVariable, String originalControl,
String expectedControl) {
return new IllegalStateException(String.format("'%s' can not be set when 'concurrencyControl'"
+ " is set to '%s'. Set 'concurrencyControl' to '%s'.", wrongVariable, originalControl, expectedControl));
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob snapshot.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* @see BlobServiceClient
* user delegation key.
* @return A {@code String} representing all SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing all SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} |
Resolved | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConcurrencyControl concurrencyControl = options.getConcurrencyControl() == null ? ConcurrencyControl.E_TAG
: options.getConcurrencyControl();
BlobProperties properties = getProperties();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (concurrencyControl) {
case NONE:
if (requestConditions.getIfMatch() != null) {
throw logger.logExceptionAsError(generateNoneException("requestConditions.ifMatch", "E_TAG"));
}
if (this.client.getVersionId() != null) {
throw logger.logExceptionAsError(generateNoneException("client.versionId", "VERSION_ID"));
}
break;
case E_TAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(
new UnsupportedOperationException("Concurrency control type not supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | BlobProperties properties = getProperties(); | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobProperties properties = getPropertiesWithResponse(options.getRequestConditions(), null, null).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
private UnsupportedOperationException generateNoneException(String wrongValue, String toSet) {
return new UnsupportedOperationException(String.format("'%s' can not be set when 'concurrencyControl'"
+ " is set to NONE. Set 'concurrencyControl' to %s.", wrongValue, toSet));
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob snapshot.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* @see BlobServiceClient
* user delegation key.
* @return A {@code String} representing all SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing all SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} |
Add `.disableAutoComplete()` By default `autoComplete` is enabled, this means this SDK will internally `complete` the message for you and user do not need to call `messageContext.complete()` . But since we want to show how user can `disable this feature` by calling `disableAutoComplete` and complete it manually (like you do in `processMessage` Consumer). We had this bug in readme and it was left over, since you are touching this specific part, you can fix this also. Thank You | public void createServiceBusProcessorClient() {
Consumer<ServiceBusReceivedMessageContext> processMessage = messageContext -> {
try {
System.out.println(messageContext.getMessage().getMessageId());
messageContext.complete();
} catch (Exception ex) {
messageContext.abandon();
}
};
Consumer<ServiceBusErrorContext> processError = errorContext -> {
System.err.println("Error occurred while receiving message: " + errorContext.getException());
};
ServiceBusProcessorClient processorClient = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.processor()
.queueName("<< QUEUE NAME >>")
.processMessage(processMessage)
.processError(processError)
.buildProcessorClient();
processorClient.start();
} | .processMessage(processMessage) | public void createServiceBusProcessorClient() {
Consumer<ServiceBusReceivedMessageContext> processMessage = messageContext -> {
try {
System.out.println(messageContext.getMessage().getMessageId());
messageContext.complete();
} catch (Exception ex) {
messageContext.abandon();
}
};
Consumer<ServiceBusErrorContext> processError = errorContext -> {
System.err.println("Error occurred while receiving message: " + errorContext.getException());
};
ServiceBusProcessorClient processorClient = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.processor()
.queueName("<< QUEUE NAME >>")
.processMessage(processMessage)
.processError(processError)
.disableAutoComplete()
.buildProcessorClient();
processorClient.start();
} | class ReadmeSamples {
/**
* Code sample for creating an asynchronous Service Bus sender.
*/
public void createAsynchronousServiceBusSender() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.sender()
.queueName("<< QUEUE NAME >>")
.buildClient();
}
/**
* Code sample for creating an asynchronous Service Bus receiver.
*/
public void createAsynchronousServiceBusReceiver() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.receiver()
.topicName("<< TOPIC NAME >>")
.subscriptionName("<< SUBSCRIPTION NAME >>")
.buildAsyncClient();
}
/**
* Code sample for creating an asynchronous Service Bus receiver using {@link DefaultAzureCredentialBuilder}.
*/
public void createAsynchronousServiceBusReceiverWithAzureIdentity() {
TokenCredential credential = new DefaultAzureCredentialBuilder()
.build();
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.credential("<<fully-qualified-namespace>>", credential)
.receiver()
.queueName("<<queue-name>>")
.buildAsyncClient();
}
/**
* Sends messages to a queue.
*/
public void sendMessage() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.sender()
.queueName("<< QUEUE NAME >>")
.buildClient();
List<ServiceBusMessage> messages = Arrays.asList(
new ServiceBusMessage("Hello world").setMessageId("1"),
new ServiceBusMessage("Bonjour").setMessageId("2"));
sender.sendMessages(messages);
sender.close();
}
/**
* Receives messages from a topic and subscription.
*/
public void receiveMessages() {
ServiceBusReceiverClient receiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.receiver()
.topicName("<< TOPIC NAME >>")
.subscriptionName("<< SUBSCRIPTION NAME >>")
.buildClient();
IterableStream<ServiceBusReceivedMessage> messages = receiver.receiveMessages(10, Duration.ofSeconds(30));
messages.forEach(message -> {
System.out.printf("Id: %s. Contents: %s%n", message.getMessageId(),
message.getBody().toString());
});
receiver.close();
}
/**
* Receives messages asynchronously.
*/
public void receiveMessagesAsync() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.receiver()
.queueName("<< QUEUE NAME >>")
.buildAsyncClient();
Disposable subscription = receiver.receiveMessages().subscribe(message -> {
System.out.printf("Id: %s%n", message.getMessageId());
System.out.printf("Contents: %s%n", message.getBody().toString());
}, error -> {
System.err.println("Error occurred while receiving messages: " + error);
}, () -> {
System.out.println("Finished receiving messages.");
});
subscription.dispose();
receiver.close();
}
/**
* Complete a message.
*/
public void completeMessage() {
ServiceBusReceiverClient receiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.receiver()
.topicName("<< TOPIC NAME >>")
.subscriptionName("<< SUBSCRIPTION NAME >>")
.receiveMode(ServiceBusReceiveMode.PEEK_LOCK)
.buildClient();
receiver.receiveMessages(10).forEach(message -> {
System.out.println("Completing message " + message.getLockToken());
receiver.complete(message);
});
}
/**
* Create a session message.
*/
public void createSessionMessage() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.sender()
.queueName("<< QUEUE NAME >>")
.buildClient();
ServiceBusMessage message = new ServiceBusMessage("Hello world")
.setSessionId("greetings");
sender.sendMessage(message);
}
/**
* Create session receiver for "greetings"
*/
public void namedSessionReceiver() {
ServiceBusSessionReceiverAsyncClient sessionReceiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.sessionReceiver()
.queueName("<< QUEUE NAME >>")
.buildAsyncClient();
Mono<ServiceBusReceiverAsyncClient> receiverAsyncClient = sessionReceiver.acceptSession("greetings");
}
/**
* Create session receiver for the first available session.
*/
public void unnamedSessionReceiver() {
ServiceBusSessionReceiverAsyncClient sessionReceiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.sessionReceiver()
.queueName("<< QUEUE NAME >>")
.buildAsyncClient();
Mono<ServiceBusReceiverAsyncClient> receiverAsyncClient = sessionReceiver.acceptNextSession();
}
/**
* Code sample for creating an synchronous Service Bus receiver to read message from dead-letter queue.
*/
public void createSynchronousServiceBusDeadLetterQueueReceiver() {
ServiceBusReceiverClient receiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.receiver()
.topicName("<< TOPIC NAME >>")
.subscriptionName("<< SUBSCRIPTION NAME >>")
.subQueue(SubQueue.DEAD_LETTER_QUEUE)
.buildClient();
}
/**
* Code sample for creating a Service Bus Processor Client.
*/
} | class ReadmeSamples {
/**
* Code sample for creating an asynchronous Service Bus sender.
*/
public void createAsynchronousServiceBusSender() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.sender()
.queueName("<< QUEUE NAME >>")
.buildClient();
}
/**
* Code sample for creating an asynchronous Service Bus receiver.
*/
public void createAsynchronousServiceBusReceiver() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.receiver()
.topicName("<< TOPIC NAME >>")
.subscriptionName("<< SUBSCRIPTION NAME >>")
.buildAsyncClient();
}
/**
* Code sample for creating an asynchronous Service Bus receiver using {@link DefaultAzureCredentialBuilder}.
*/
public void createAsynchronousServiceBusReceiverWithAzureIdentity() {
TokenCredential credential = new DefaultAzureCredentialBuilder()
.build();
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.credential("<<fully-qualified-namespace>>", credential)
.receiver()
.queueName("<<queue-name>>")
.buildAsyncClient();
}
/**
* Sends messages to a queue.
*/
public void sendMessage() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.sender()
.queueName("<< QUEUE NAME >>")
.buildClient();
List<ServiceBusMessage> messages = Arrays.asList(
new ServiceBusMessage("Hello world").setMessageId("1"),
new ServiceBusMessage("Bonjour").setMessageId("2"));
sender.sendMessages(messages);
sender.close();
}
/**
* Receives messages from a topic and subscription.
*/
public void receiveMessages() {
ServiceBusReceiverClient receiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.receiver()
.topicName("<< TOPIC NAME >>")
.subscriptionName("<< SUBSCRIPTION NAME >>")
.buildClient();
IterableStream<ServiceBusReceivedMessage> messages = receiver.receiveMessages(10, Duration.ofSeconds(30));
messages.forEach(message -> {
System.out.printf("Id: %s. Contents: %s%n", message.getMessageId(),
message.getBody().toString());
});
receiver.close();
}
/**
* Receives messages asynchronously.
*/
public void receiveMessagesAsync() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.receiver()
.queueName("<< QUEUE NAME >>")
.buildAsyncClient();
Disposable subscription = receiver.receiveMessages().subscribe(message -> {
System.out.printf("Id: %s%n", message.getMessageId());
System.out.printf("Contents: %s%n", message.getBody().toString());
}, error -> {
System.err.println("Error occurred while receiving messages: " + error);
}, () -> {
System.out.println("Finished receiving messages.");
});
subscription.dispose();
receiver.close();
}
/**
* Complete a message.
*/
public void completeMessage() {
ServiceBusReceiverClient receiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.receiver()
.topicName("<< TOPIC NAME >>")
.subscriptionName("<< SUBSCRIPTION NAME >>")
.receiveMode(ServiceBusReceiveMode.PEEK_LOCK)
.buildClient();
receiver.receiveMessages(10).forEach(message -> {
System.out.println("Completing message " + message.getLockToken());
receiver.complete(message);
});
}
/**
* Create a session message.
*/
public void createSessionMessage() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.sender()
.queueName("<< QUEUE NAME >>")
.buildClient();
ServiceBusMessage message = new ServiceBusMessage("Hello world")
.setSessionId("greetings");
sender.sendMessage(message);
}
/**
* Create session receiver for "greetings"
*/
public void namedSessionReceiver() {
ServiceBusSessionReceiverAsyncClient sessionReceiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.sessionReceiver()
.queueName("<< QUEUE NAME >>")
.buildAsyncClient();
Mono<ServiceBusReceiverAsyncClient> receiverAsyncClient = sessionReceiver.acceptSession("greetings");
}
/**
* Create session receiver for the first available session.
*/
public void unnamedSessionReceiver() {
ServiceBusSessionReceiverAsyncClient sessionReceiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.sessionReceiver()
.queueName("<< QUEUE NAME >>")
.buildAsyncClient();
Mono<ServiceBusReceiverAsyncClient> receiverAsyncClient = sessionReceiver.acceptNextSession();
}
/**
* Code sample for creating an synchronous Service Bus receiver to read message from dead-letter queue.
*/
public void createSynchronousServiceBusDeadLetterQueueReceiver() {
ServiceBusReceiverClient receiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.receiver()
.topicName("<< TOPIC NAME >>")
.subscriptionName("<< SUBSCRIPTION NAME >>")
.subQueue(SubQueue.DEAD_LETTER_QUEUE)
.buildClient();
}
/**
* Code sample for creating a Service Bus Processor Client.
*/
} |
Updated | public BinaryData getData() {
if (event.getData() != null) {
return EventGridDeserializer.getData(event.getData());
}
return null;
} | return EventGridDeserializer.getData(event.getData()); | public BinaryData getData() {
return EventGridDeserializer.getData(event.getData());
} | class EventGridEvent {
private final com.azure.messaging.eventgrid.implementation.models.EventGridEvent event;
private static final ClientLogger logger = new ClientLogger(EventGridEvent.class);
/**
* Create a new instance of the EventGridEvent, with the given required fields.
* @param subject the subject of the event.
* @param eventType the type of the event, e.g. "Contoso.Items.ItemReceived".
* @param data the data associated with this event.
* @param dataVersion the version of the data sent along with the event.
*/
public EventGridEvent(String subject, String eventType, Object data, String dataVersion) {
if (CoreUtils.isNullOrEmpty(subject)) {
throw logger.logExceptionAsError(new IllegalArgumentException("subject cannot be null or empty"));
} else if (CoreUtils.isNullOrEmpty(eventType)) {
throw logger.logExceptionAsError(new IllegalArgumentException("event type cannot be null or empty"));
} else if (CoreUtils.isNullOrEmpty(dataVersion)) {
throw logger.logExceptionAsError(new IllegalArgumentException("data version cannot be null or empty"));
}
this.event = new com.azure.messaging.eventgrid.implementation.models.EventGridEvent()
.setEventTime(OffsetDateTime.now())
.setId(UUID.randomUUID().toString())
.setSubject(subject)
.setEventType(eventType)
.setData(data)
.setDataVersion(dataVersion);
}
/**
* Deserialize the {@link EventGridEvent} from a JSON string.
* @param eventGridJsonString the JSON payload containing one or more events.
*
* @return all of the events in the payload deserialized as {@link EventGridEvent}s.
* @throws IllegalArgumentException if the input parameter isn't a JSON string for a eventgrid event
* or an array of it.
*/
public static List<EventGridEvent> fromString(String eventGridJsonString) {
return EventGridDeserializer.deserializeEventGridEvents(eventGridJsonString);
}
/**
* Get the unique id associated with this event.
* @return the id.
*/
public String getId() {
return this.event.getId();
}
/**
* Set the unique id of the event. Note that a random id has already been set by default.
* @param id the unique id to set.
*
* @return the event itself.
*/
public EventGridEvent setId(String id) {
if (CoreUtils.isNullOrEmpty(id)) {
throw logger.logExceptionAsError(new IllegalArgumentException("id cannot be null or empty"));
}
this.event.setId(id);
return this;
}
/**
* Get the topic associated with this event if it is associated with a domain.
* @return the topic, or null if the topic is not set (i.e. the event came from or is going to a domain).
*/
public String getTopic() {
return this.event.getTopic();
}
/**
* Set the topic associated with this event. Used to route events from domain endpoints.
* @param topic the topic to set.
*
* @return the event itself.
*/
public EventGridEvent setTopic(String topic) {
this.event.setTopic(topic);
return this;
}
/**
* Get the subject associated with this event.
* @return the subject.
*/
public String getSubject() {
return this.event.getSubject();
}
/**
* Get whether this event is a system event.
* @see SystemEventNames
* @return {@code true} if the even is a system event, or {@code false} otherwise.
*/
public boolean isSystemEvent() {
String eventType = this.getEventType();
return SystemEventNames.getSystemEventMappings().containsKey(eventType);
}
/**
* Convert the event's data into the system event data if the event is a system event.
* @see SystemEventNames
* @return The system event if the event is a system event, or {@code null} if it's not.
*/
public Object asSystemEventData() {
if (event.getData() == null) {
return null;
}
return EventGridDeserializer.getSystemEventData(this.getData(), event.getEventType());
}
/**
* Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into
* a String, an Object, or a byte[].
* @return A {@link BinaryData} that wraps the this event's data payload.
*/
/**
* Get the type of this event.
* @return the event type.
*/
public String getEventType() {
return this.event.getEventType();
}
/**
* Get the time associated with the occurrence of this event.
* @return the event time.
*/
public OffsetDateTime getEventTime() {
return this.event.getEventTime();
}
/**
* Set the time associated with the event. Note that a default time has already been set when the event was
* constructed.
* @param time the time to set.
*
* @return the event itself.
*/
public EventGridEvent setEventTime(OffsetDateTime time) {
this.event.setEventTime(time);
return this;
}
/**
* Get the version of the data in the event. This can be used to specify versioning of event data schemas over time.
* @return the version of the event data.
*/
public String getDataVersion() {
return this.event.getDataVersion();
}
EventGridEvent(com.azure.messaging.eventgrid.implementation.models.EventGridEvent impl) {
this.event = impl;
}
com.azure.messaging.eventgrid.implementation.models.EventGridEvent toImpl() {
return this.event;
}
} | class EventGridEvent {
private final com.azure.messaging.eventgrid.implementation.models.EventGridEvent event;
private static final ClientLogger logger = new ClientLogger(EventGridEvent.class);
/**
* Create a new instance of the EventGridEvent, with the given required fields.
* @param subject the subject of the event.
* @param eventType the type of the event, e.g. "Contoso.Items.ItemReceived".
* @param data the data associated with this event.
* @param dataVersion the version of the data sent along with the event.
*
* @throws IllegalArgumentException if subject, eventType or data is {@code null} or empty.
*/
public EventGridEvent(String subject, String eventType, Object data, String dataVersion) {
if (CoreUtils.isNullOrEmpty(subject)) {
throw logger.logExceptionAsError(new IllegalArgumentException("'subject' cannot be null or empty."));
} else if (CoreUtils.isNullOrEmpty(eventType)) {
throw logger.logExceptionAsError(new IllegalArgumentException("'eventType' cannot be null or empty."));
} else if (CoreUtils.isNullOrEmpty(dataVersion)) {
throw logger.logExceptionAsError(new IllegalArgumentException("'dataVersion' cannot be null or empty."));
}
this.event = new com.azure.messaging.eventgrid.implementation.models.EventGridEvent()
.setEventTime(OffsetDateTime.now())
.setId(UUID.randomUUID().toString())
.setSubject(subject)
.setEventType(eventType)
.setData(data)
.setDataVersion(dataVersion);
}
/**
* Deserialize the {@link EventGridEvent} from a JSON string.
* @param eventGridJsonString the JSON payload containing one or more events.
*
* @return all of the events in the payload deserialized as {@link EventGridEvent EventGridEvents}.
* @throws IllegalArgumentException if eventGridJsonString isn't a JSON string for a eventgrid event
* or an array of it.
* @throws NullPointerException if eventGridJsonString is {@code null}.
*/
public static List<EventGridEvent> fromString(String eventGridJsonString) {
return EventGridDeserializer.deserializeEventGridEvents(eventGridJsonString);
}
/**
* Get the unique id associated with this event.
* @return the id.
*/
public String getId() {
return this.event.getId();
}
/**
* Set the unique id of the event. Note that a random id has already been set by default.
* @param id the unique id to set.
*
* @return the event itself.
*/
public EventGridEvent setId(String id) {
if (CoreUtils.isNullOrEmpty(id)) {
throw logger.logExceptionAsError(new IllegalArgumentException("'id' cannot be null or empty."));
}
this.event.setId(id);
return this;
}
/**
* Get the topic associated with this event if it is associated with a domain.
* @return the topic, or null if the topic is not set (i.e. the event came from or is going to a domain).
*/
public String getTopic() {
return this.event.getTopic();
}
/**
* Set the topic associated with this event. Used to route events from domain endpoints.
* @param topic the topic to set.
*
* @return the event itself.
*/
public EventGridEvent setTopic(String topic) {
this.event.setTopic(topic);
return this;
}
/**
* Get the subject associated with this event.
* @return the subject.
*/
public String getSubject() {
return this.event.getSubject();
}
/**
* Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into
* a String, an Object, or a byte[].
* @return A {@link BinaryData} that wraps the this event's data payload.
*/
/**
* Get the type of this event.
* @return the event type.
*/
public String getEventType() {
return this.event.getEventType();
}
/**
* Get the time associated with the occurrence of this event.
* @return the event time.
*/
public OffsetDateTime getEventTime() {
return this.event.getEventTime();
}
/**
* Set the time associated with the event. Note that a default time has already been set when the event was
* constructed.
* @param time the time to set.
*
* @return the event itself.
*/
public EventGridEvent setEventTime(OffsetDateTime time) {
this.event.setEventTime(time);
return this;
}
/**
* Get the version of the data in the event. This can be used to specify versioning of event data schemas over time.
* @return the version of the event data.
*/
public String getDataVersion() {
return this.event.getDataVersion();
}
EventGridEvent(com.azure.messaging.eventgrid.implementation.models.EventGridEvent impl) {
this.event = impl;
}
com.azure.messaging.eventgrid.implementation.models.EventGridEvent toImpl() {
return this.event;
}
} |
Resolved | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConcurrencyControl concurrencyControl = options.getConcurrencyControl() == null ? ConcurrencyControl.E_TAG
: options.getConcurrencyControl();
BlobProperties properties = getProperties();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (concurrencyControl) {
case NONE:
if (requestConditions.getIfMatch() != null) {
throw logger.logExceptionAsError(generateNoneException("requestConditions.ifMatch", "E_TAG"));
}
if (this.client.getVersionId() != null) {
throw logger.logExceptionAsError(generateNoneException("client.versionId", "VERSION_ID"));
}
break;
case E_TAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(
new UnsupportedOperationException("Concurrency control type not supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | client = this.client.getVersionClient(versionId); | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobProperties properties = getPropertiesWithResponse(options.getRequestConditions(), null, null).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize,
requestConditions, properties);
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
private UnsupportedOperationException generateNoneException(String wrongValue, String toSet) {
return new UnsupportedOperationException(String.format("'%s' can not be set when 'concurrencyControl'"
+ " is set to NONE. Set 'concurrencyControl' to %s.", wrongValue, toSet));
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob snapshot.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* @see BlobServiceClient
* user delegation key.
* @return A {@code String} representing all SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing all SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
} |
I used `disableAutoComplete` in a project to fix exactly the issue you wrote. I added the change to the pull request. | public void createServiceBusProcessorClient() {
Consumer<ServiceBusReceivedMessageContext> processMessage = messageContext -> {
try {
System.out.println(messageContext.getMessage().getMessageId());
messageContext.complete();
} catch (Exception ex) {
messageContext.abandon();
}
};
Consumer<ServiceBusErrorContext> processError = errorContext -> {
System.err.println("Error occurred while receiving message: " + errorContext.getException());
};
ServiceBusProcessorClient processorClient = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.processor()
.queueName("<< QUEUE NAME >>")
.processMessage(processMessage)
.processError(processError)
.buildProcessorClient();
processorClient.start();
} | .processMessage(processMessage) | public void createServiceBusProcessorClient() {
Consumer<ServiceBusReceivedMessageContext> processMessage = messageContext -> {
try {
System.out.println(messageContext.getMessage().getMessageId());
messageContext.complete();
} catch (Exception ex) {
messageContext.abandon();
}
};
Consumer<ServiceBusErrorContext> processError = errorContext -> {
System.err.println("Error occurred while receiving message: " + errorContext.getException());
};
ServiceBusProcessorClient processorClient = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.processor()
.queueName("<< QUEUE NAME >>")
.processMessage(processMessage)
.processError(processError)
.disableAutoComplete()
.buildProcessorClient();
processorClient.start();
} | class ReadmeSamples {
/**
* Code sample for creating an asynchronous Service Bus sender.
*/
public void createAsynchronousServiceBusSender() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.sender()
.queueName("<< QUEUE NAME >>")
.buildClient();
}
/**
* Code sample for creating an asynchronous Service Bus receiver.
*/
public void createAsynchronousServiceBusReceiver() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.receiver()
.topicName("<< TOPIC NAME >>")
.subscriptionName("<< SUBSCRIPTION NAME >>")
.buildAsyncClient();
}
/**
* Code sample for creating an asynchronous Service Bus receiver using {@link DefaultAzureCredentialBuilder}.
*/
public void createAsynchronousServiceBusReceiverWithAzureIdentity() {
TokenCredential credential = new DefaultAzureCredentialBuilder()
.build();
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.credential("<<fully-qualified-namespace>>", credential)
.receiver()
.queueName("<<queue-name>>")
.buildAsyncClient();
}
/**
* Sends messages to a queue.
*/
public void sendMessage() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.sender()
.queueName("<< QUEUE NAME >>")
.buildClient();
List<ServiceBusMessage> messages = Arrays.asList(
new ServiceBusMessage("Hello world").setMessageId("1"),
new ServiceBusMessage("Bonjour").setMessageId("2"));
sender.sendMessages(messages);
sender.close();
}
/**
* Receives messages from a topic and subscription.
*/
public void receiveMessages() {
ServiceBusReceiverClient receiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.receiver()
.topicName("<< TOPIC NAME >>")
.subscriptionName("<< SUBSCRIPTION NAME >>")
.buildClient();
IterableStream<ServiceBusReceivedMessage> messages = receiver.receiveMessages(10, Duration.ofSeconds(30));
messages.forEach(message -> {
System.out.printf("Id: %s. Contents: %s%n", message.getMessageId(),
message.getBody().toString());
});
receiver.close();
}
/**
* Receives messages asynchronously.
*/
public void receiveMessagesAsync() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.receiver()
.queueName("<< QUEUE NAME >>")
.buildAsyncClient();
Disposable subscription = receiver.receiveMessages().subscribe(message -> {
System.out.printf("Id: %s%n", message.getMessageId());
System.out.printf("Contents: %s%n", message.getBody().toString());
}, error -> {
System.err.println("Error occurred while receiving messages: " + error);
}, () -> {
System.out.println("Finished receiving messages.");
});
subscription.dispose();
receiver.close();
}
/**
* Complete a message.
*/
public void completeMessage() {
ServiceBusReceiverClient receiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.receiver()
.topicName("<< TOPIC NAME >>")
.subscriptionName("<< SUBSCRIPTION NAME >>")
.receiveMode(ServiceBusReceiveMode.PEEK_LOCK)
.buildClient();
receiver.receiveMessages(10).forEach(message -> {
System.out.println("Completing message " + message.getLockToken());
receiver.complete(message);
});
}
/**
* Create a session message.
*/
public void createSessionMessage() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.sender()
.queueName("<< QUEUE NAME >>")
.buildClient();
ServiceBusMessage message = new ServiceBusMessage("Hello world")
.setSessionId("greetings");
sender.sendMessage(message);
}
/**
* Create session receiver for "greetings"
*/
public void namedSessionReceiver() {
ServiceBusSessionReceiverAsyncClient sessionReceiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.sessionReceiver()
.queueName("<< QUEUE NAME >>")
.buildAsyncClient();
Mono<ServiceBusReceiverAsyncClient> receiverAsyncClient = sessionReceiver.acceptSession("greetings");
}
/**
* Create session receiver for the first available session.
*/
public void unnamedSessionReceiver() {
ServiceBusSessionReceiverAsyncClient sessionReceiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.sessionReceiver()
.queueName("<< QUEUE NAME >>")
.buildAsyncClient();
Mono<ServiceBusReceiverAsyncClient> receiverAsyncClient = sessionReceiver.acceptNextSession();
}
/**
* Code sample for creating an synchronous Service Bus receiver to read message from dead-letter queue.
*/
public void createSynchronousServiceBusDeadLetterQueueReceiver() {
ServiceBusReceiverClient receiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.receiver()
.topicName("<< TOPIC NAME >>")
.subscriptionName("<< SUBSCRIPTION NAME >>")
.subQueue(SubQueue.DEAD_LETTER_QUEUE)
.buildClient();
}
/**
* Code sample for creating a Service Bus Processor Client.
*/
} | class ReadmeSamples {
/**
* Code sample for creating an asynchronous Service Bus sender.
*/
public void createAsynchronousServiceBusSender() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.sender()
.queueName("<< QUEUE NAME >>")
.buildClient();
}
/**
* Code sample for creating an asynchronous Service Bus receiver.
*/
public void createAsynchronousServiceBusReceiver() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.receiver()
.topicName("<< TOPIC NAME >>")
.subscriptionName("<< SUBSCRIPTION NAME >>")
.buildAsyncClient();
}
/**
* Code sample for creating an asynchronous Service Bus receiver using {@link DefaultAzureCredentialBuilder}.
*/
public void createAsynchronousServiceBusReceiverWithAzureIdentity() {
TokenCredential credential = new DefaultAzureCredentialBuilder()
.build();
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.credential("<<fully-qualified-namespace>>", credential)
.receiver()
.queueName("<<queue-name>>")
.buildAsyncClient();
}
/**
* Sends messages to a queue.
*/
public void sendMessage() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.sender()
.queueName("<< QUEUE NAME >>")
.buildClient();
List<ServiceBusMessage> messages = Arrays.asList(
new ServiceBusMessage("Hello world").setMessageId("1"),
new ServiceBusMessage("Bonjour").setMessageId("2"));
sender.sendMessages(messages);
sender.close();
}
/**
* Receives messages from a topic and subscription.
*/
public void receiveMessages() {
ServiceBusReceiverClient receiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.receiver()
.topicName("<< TOPIC NAME >>")
.subscriptionName("<< SUBSCRIPTION NAME >>")
.buildClient();
IterableStream<ServiceBusReceivedMessage> messages = receiver.receiveMessages(10, Duration.ofSeconds(30));
messages.forEach(message -> {
System.out.printf("Id: %s. Contents: %s%n", message.getMessageId(),
message.getBody().toString());
});
receiver.close();
}
/**
* Receives messages asynchronously.
*/
public void receiveMessagesAsync() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.receiver()
.queueName("<< QUEUE NAME >>")
.buildAsyncClient();
Disposable subscription = receiver.receiveMessages().subscribe(message -> {
System.out.printf("Id: %s%n", message.getMessageId());
System.out.printf("Contents: %s%n", message.getBody().toString());
}, error -> {
System.err.println("Error occurred while receiving messages: " + error);
}, () -> {
System.out.println("Finished receiving messages.");
});
subscription.dispose();
receiver.close();
}
/**
* Complete a message.
*/
public void completeMessage() {
ServiceBusReceiverClient receiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.receiver()
.topicName("<< TOPIC NAME >>")
.subscriptionName("<< SUBSCRIPTION NAME >>")
.receiveMode(ServiceBusReceiveMode.PEEK_LOCK)
.buildClient();
receiver.receiveMessages(10).forEach(message -> {
System.out.println("Completing message " + message.getLockToken());
receiver.complete(message);
});
}
/**
* Create a session message.
*/
public void createSessionMessage() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.sender()
.queueName("<< QUEUE NAME >>")
.buildClient();
ServiceBusMessage message = new ServiceBusMessage("Hello world")
.setSessionId("greetings");
sender.sendMessage(message);
}
/**
* Create session receiver for "greetings"
*/
public void namedSessionReceiver() {
ServiceBusSessionReceiverAsyncClient sessionReceiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.sessionReceiver()
.queueName("<< QUEUE NAME >>")
.buildAsyncClient();
Mono<ServiceBusReceiverAsyncClient> receiverAsyncClient = sessionReceiver.acceptSession("greetings");
}
/**
* Create session receiver for the first available session.
*/
public void unnamedSessionReceiver() {
ServiceBusSessionReceiverAsyncClient sessionReceiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.sessionReceiver()
.queueName("<< QUEUE NAME >>")
.buildAsyncClient();
Mono<ServiceBusReceiverAsyncClient> receiverAsyncClient = sessionReceiver.acceptNextSession();
}
/**
* Code sample for creating an synchronous Service Bus receiver to read message from dead-letter queue.
*/
public void createSynchronousServiceBusDeadLetterQueueReceiver() {
ServiceBusReceiverClient receiver = new ServiceBusClientBuilder()
.connectionString("<< CONNECTION STRING FOR THE SERVICE BUS NAMESPACE >>")
.receiver()
.topicName("<< TOPIC NAME >>")
.subscriptionName("<< SUBSCRIPTION NAME >>")
.subQueue(SubQueue.DEAD_LETTER_QUEUE)
.buildClient();
}
/**
* Code sample for creating a Service Bus Processor Client.
*/
} |
@Blackbaud-EricSlater - why do we need an instance here ? Since this is a stateless service, creating instance here is redundant in my opinion, thoughts ? It will unnecessary create multiple instances of `IndexPolicyCompareService`, where in case of static method, all it needs is just the Class instance, which does not pose a risk of memory leak. | public static boolean policyNeedsUpdate(IndexingPolicy existingPolicy, IndexingPolicy newPolicy) {
return new IndexPolicyCompareService().needsUpdate(existingPolicy, newPolicy);
} | return new IndexPolicyCompareService().needsUpdate(existingPolicy, newPolicy); | public static boolean policyNeedsUpdate(IndexingPolicy existingPolicy, IndexingPolicy newPolicy) {
return !hasSameIncludedPaths(existingPolicy.getIncludedPaths(), newPolicy.getIncludedPaths())
|| !hasSameExcludedPaths(existingPolicy.getExcludedPaths(), newPolicy.getExcludedPaths())
|| !existingPolicy.getIndexingMode().equals(newPolicy.getIndexingMode())
|| !existingPolicy.isAutomatic().equals(newPolicy.isAutomatic());
} | class IndexPolicyCompareService {
private boolean needsUpdate(IndexingPolicy existingPolicy, IndexingPolicy newPolicy) {
return !hasSameIncludedPaths(existingPolicy.getIncludedPaths(), newPolicy.getIncludedPaths())
|| !hasSameExcludedPaths(existingPolicy.getExcludedPaths(), newPolicy.getExcludedPaths())
|| !existingPolicy.getIndexingMode().equals(newPolicy.getIndexingMode())
|| !existingPolicy.isAutomatic().equals(newPolicy.isAutomatic());
}
private boolean hasSameIncludedPaths(List<IncludedPath> existingPaths, List<IncludedPath> newPaths) {
List<IncludedPath> existingListDiff = existingPaths.stream()
.filter(element -> !newPaths.contains(element))
.collect(Collectors.toList());
List<IncludedPath> newListDiff = newPaths.stream()
.filter(element -> !existingPaths.contains(element))
.collect(Collectors.toList());
return (existingListDiff.size() == 0 && newListDiff.size() == 0)
|| (newListDiff.size() == 0
&& existingListDiff.size() == 1
&& existingListDiff.get(0).getPath().equals("/*"));
}
private boolean hasSameExcludedPaths(List<ExcludedPath> existingPaths, List<ExcludedPath> newPaths) {
List<ExcludedPath> existingListDiff = existingPaths.stream()
.filter(element -> !newPaths.contains(element))
.collect(Collectors.toList());
List<ExcludedPath> newListDiff = newPaths.stream()
.filter(element -> !existingPaths.contains(element))
.collect(Collectors.toList());
return (existingListDiff.size() == 0 && newListDiff.size() == 0)
|| (newListDiff.size() == 0
&& existingListDiff.size() == 1
&& existingListDiff.get(0).getPath().equals("/\"_etag\"/?"));
}
} | class IndexPolicyCompareService {
private static boolean hasSameIncludedPaths(List<IncludedPath> existingPaths, List<IncludedPath> newPaths) {
List<IncludedPath> existingListDiff = existingPaths.stream()
.filter(element -> !newPaths.contains(element))
.collect(Collectors.toList());
List<IncludedPath> newListDiff = newPaths.stream()
.filter(element -> !existingPaths.contains(element))
.collect(Collectors.toList());
return (existingListDiff.size() == 0 && newListDiff.size() == 0)
|| (newListDiff.size() == 0
&& existingListDiff.size() == 1
&& existingListDiff.get(0).getPath().equals("/*"));
}
private static boolean hasSameExcludedPaths(List<ExcludedPath> existingPaths, List<ExcludedPath> newPaths) {
List<ExcludedPath> existingListDiff = existingPaths.stream()
.filter(element -> !newPaths.contains(element))
.collect(Collectors.toList());
List<ExcludedPath> newListDiff = newPaths.stream()
.filter(element -> !existingPaths.contains(element))
.collect(Collectors.toList());
return (existingListDiff.size() == 0 && newListDiff.size() == 0)
|| (newListDiff.size() == 0
&& existingListDiff.size() == 1
&& existingListDiff.get(0).getPath().equals("/\"_etag\"/?"));
}
} |
@Blackbaud-EricSlater - can you please implement the above changes so we can get this PR in ? I am currently out on vacation, and won't be able to review after tomorrow. In case you are out too, we can merge it in January. | public static boolean policyNeedsUpdate(IndexingPolicy existingPolicy, IndexingPolicy newPolicy) {
return new IndexPolicyCompareService().needsUpdate(existingPolicy, newPolicy);
} | return new IndexPolicyCompareService().needsUpdate(existingPolicy, newPolicy); | public static boolean policyNeedsUpdate(IndexingPolicy existingPolicy, IndexingPolicy newPolicy) {
return !hasSameIncludedPaths(existingPolicy.getIncludedPaths(), newPolicy.getIncludedPaths())
|| !hasSameExcludedPaths(existingPolicy.getExcludedPaths(), newPolicy.getExcludedPaths())
|| !existingPolicy.getIndexingMode().equals(newPolicy.getIndexingMode())
|| !existingPolicy.isAutomatic().equals(newPolicy.isAutomatic());
} | class IndexPolicyCompareService {
private boolean needsUpdate(IndexingPolicy existingPolicy, IndexingPolicy newPolicy) {
return !hasSameIncludedPaths(existingPolicy.getIncludedPaths(), newPolicy.getIncludedPaths())
|| !hasSameExcludedPaths(existingPolicy.getExcludedPaths(), newPolicy.getExcludedPaths())
|| !existingPolicy.getIndexingMode().equals(newPolicy.getIndexingMode())
|| !existingPolicy.isAutomatic().equals(newPolicy.isAutomatic());
}
private boolean hasSameIncludedPaths(List<IncludedPath> existingPaths, List<IncludedPath> newPaths) {
List<IncludedPath> existingListDiff = existingPaths.stream()
.filter(element -> !newPaths.contains(element))
.collect(Collectors.toList());
List<IncludedPath> newListDiff = newPaths.stream()
.filter(element -> !existingPaths.contains(element))
.collect(Collectors.toList());
return (existingListDiff.size() == 0 && newListDiff.size() == 0)
|| (newListDiff.size() == 0
&& existingListDiff.size() == 1
&& existingListDiff.get(0).getPath().equals("/*"));
}
private boolean hasSameExcludedPaths(List<ExcludedPath> existingPaths, List<ExcludedPath> newPaths) {
List<ExcludedPath> existingListDiff = existingPaths.stream()
.filter(element -> !newPaths.contains(element))
.collect(Collectors.toList());
List<ExcludedPath> newListDiff = newPaths.stream()
.filter(element -> !existingPaths.contains(element))
.collect(Collectors.toList());
return (existingListDiff.size() == 0 && newListDiff.size() == 0)
|| (newListDiff.size() == 0
&& existingListDiff.size() == 1
&& existingListDiff.get(0).getPath().equals("/\"_etag\"/?"));
}
} | class IndexPolicyCompareService {
private static boolean hasSameIncludedPaths(List<IncludedPath> existingPaths, List<IncludedPath> newPaths) {
List<IncludedPath> existingListDiff = existingPaths.stream()
.filter(element -> !newPaths.contains(element))
.collect(Collectors.toList());
List<IncludedPath> newListDiff = newPaths.stream()
.filter(element -> !existingPaths.contains(element))
.collect(Collectors.toList());
return (existingListDiff.size() == 0 && newListDiff.size() == 0)
|| (newListDiff.size() == 0
&& existingListDiff.size() == 1
&& existingListDiff.get(0).getPath().equals("/*"));
}
private static boolean hasSameExcludedPaths(List<ExcludedPath> existingPaths, List<ExcludedPath> newPaths) {
List<ExcludedPath> existingListDiff = existingPaths.stream()
.filter(element -> !newPaths.contains(element))
.collect(Collectors.toList());
List<ExcludedPath> newListDiff = newPaths.stream()
.filter(element -> !existingPaths.contains(element))
.collect(Collectors.toList());
return (existingListDiff.size() == 0 && newListDiff.size() == 0)
|| (newListDiff.size() == 0
&& existingListDiff.size() == 1
&& existingListDiff.get(0).getPath().equals("/\"_etag\"/?"));
}
} |
fixed | public static boolean policyNeedsUpdate(IndexingPolicy existingPolicy, IndexingPolicy newPolicy) {
return new IndexPolicyCompareService().needsUpdate(existingPolicy, newPolicy);
} | return new IndexPolicyCompareService().needsUpdate(existingPolicy, newPolicy); | public static boolean policyNeedsUpdate(IndexingPolicy existingPolicy, IndexingPolicy newPolicy) {
return !hasSameIncludedPaths(existingPolicy.getIncludedPaths(), newPolicy.getIncludedPaths())
|| !hasSameExcludedPaths(existingPolicy.getExcludedPaths(), newPolicy.getExcludedPaths())
|| !existingPolicy.getIndexingMode().equals(newPolicy.getIndexingMode())
|| !existingPolicy.isAutomatic().equals(newPolicy.isAutomatic());
} | class IndexPolicyCompareService {
private boolean needsUpdate(IndexingPolicy existingPolicy, IndexingPolicy newPolicy) {
return !hasSameIncludedPaths(existingPolicy.getIncludedPaths(), newPolicy.getIncludedPaths())
|| !hasSameExcludedPaths(existingPolicy.getExcludedPaths(), newPolicy.getExcludedPaths())
|| !existingPolicy.getIndexingMode().equals(newPolicy.getIndexingMode())
|| !existingPolicy.isAutomatic().equals(newPolicy.isAutomatic());
}
private boolean hasSameIncludedPaths(List<IncludedPath> existingPaths, List<IncludedPath> newPaths) {
List<IncludedPath> existingListDiff = existingPaths.stream()
.filter(element -> !newPaths.contains(element))
.collect(Collectors.toList());
List<IncludedPath> newListDiff = newPaths.stream()
.filter(element -> !existingPaths.contains(element))
.collect(Collectors.toList());
return (existingListDiff.size() == 0 && newListDiff.size() == 0)
|| (newListDiff.size() == 0
&& existingListDiff.size() == 1
&& existingListDiff.get(0).getPath().equals("/*"));
}
private boolean hasSameExcludedPaths(List<ExcludedPath> existingPaths, List<ExcludedPath> newPaths) {
List<ExcludedPath> existingListDiff = existingPaths.stream()
.filter(element -> !newPaths.contains(element))
.collect(Collectors.toList());
List<ExcludedPath> newListDiff = newPaths.stream()
.filter(element -> !existingPaths.contains(element))
.collect(Collectors.toList());
return (existingListDiff.size() == 0 && newListDiff.size() == 0)
|| (newListDiff.size() == 0
&& existingListDiff.size() == 1
&& existingListDiff.get(0).getPath().equals("/\"_etag\"/?"));
}
} | class IndexPolicyCompareService {
private static boolean hasSameIncludedPaths(List<IncludedPath> existingPaths, List<IncludedPath> newPaths) {
List<IncludedPath> existingListDiff = existingPaths.stream()
.filter(element -> !newPaths.contains(element))
.collect(Collectors.toList());
List<IncludedPath> newListDiff = newPaths.stream()
.filter(element -> !existingPaths.contains(element))
.collect(Collectors.toList());
return (existingListDiff.size() == 0 && newListDiff.size() == 0)
|| (newListDiff.size() == 0
&& existingListDiff.size() == 1
&& existingListDiff.get(0).getPath().equals("/*"));
}
private static boolean hasSameExcludedPaths(List<ExcludedPath> existingPaths, List<ExcludedPath> newPaths) {
List<ExcludedPath> existingListDiff = existingPaths.stream()
.filter(element -> !newPaths.contains(element))
.collect(Collectors.toList());
List<ExcludedPath> newListDiff = newPaths.stream()
.filter(element -> !existingPaths.contains(element))
.collect(Collectors.toList());
return (existingListDiff.size() == 0 && newListDiff.size() == 0)
|| (newListDiff.size() == 0
&& existingListDiff.size() == 1
&& existingListDiff.get(0).getPath().equals("/\"_etag\"/?"));
}
} |
null check for `data` and `dataContentType` ? | public CloudEvent(String source, String type, byte[] data, String dataContentType) {
this(source, type);
this.setDataBase64(data, dataContentType);
} | this(source, type); | public CloudEvent(String source, String type, byte[] data, String dataContentType) {
this(source, type);
this.setDataBase64(data, dataContentType);
} | class CloudEvent {
private static final String SPEC_VERSION = "1.0";
private final com.azure.messaging.eventgrid.implementation.models.CloudEvent cloudEvent;
private static final ClientLogger logger = new ClientLogger(CloudEvent.class);
/**
* Create an instance of CloudEvent. The source and type are required fields to publish.
* @param source a URI identifying the origin of the event. It can't be null or empty.
* @param type the type of event, e.g. "Contoso.Items.ItemReceived". It can't be null or empty.
* @param data the payload of this event. Set to null if your event doesn't have the data payload.
* It will be serialized as a String if it's a String, or application/json if it's not a String.
*/
public CloudEvent(String source, String type, Object data) {
this(source, type);
this.setData(data);
}
/**
* Create an instance of CloudEvent. The source and type are required fields to publish.
* @param source a URI identifying the origin of the event.
* @param type the type of event, e.g. "Contoso.Items.ItemReceived".
* @param data the payload in bytes of this event. It will be serialized to Base64 format.
* @param dataContentType the type of the data.
*/
private CloudEvent(String source, String type) {
if (CoreUtils.isNullOrEmpty(source)) {
throw logger.logExceptionAsError(new IllegalArgumentException("Source cannot be null or empty"));
}
if (CoreUtils.isNullOrEmpty(type)) {
throw logger.logExceptionAsError(new IllegalArgumentException("type cannot be null or empty"));
}
this.cloudEvent = new com.azure.messaging.eventgrid.implementation.models.CloudEvent()
.setId(UUID.randomUUID().toString())
.setSource(source)
.setType(type)
.setSpecversion(SPEC_VERSION);
}
/**
* Deserialize the {@link CloudEvent} from a JSON string.
* @param cloudEventJsonString the JSON payload containing one or more events.
*
* @return all of the events in the payload deserialized as {@link CloudEvent}s.
* @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it.
*/
public static List<CloudEvent> fromString(String cloudEventJsonString) {
return EventGridDeserializer.deserializeCloudEvents(cloudEventJsonString);
}
/**
* Get the id of the cloud event.
* @return the id.
*/
public String getId() {
return this.cloudEvent.getId();
}
/**
* Set a custom id. Note that a random id is already set by default.
* @param id the id to set.
*
* @return the cloud event itself.
*/
public CloudEvent setId(String id) {
if (CoreUtils.isNullOrEmpty(id)) {
throw new IllegalArgumentException("id cannot be null or empty");
}
this.cloudEvent.setId(id);
return this;
}
/**
* Get the URI source of the event.
* @return the source.
*/
public String getSource() {
return this.cloudEvent.getSource();
}
/**
* Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into
* a String, an Object, or a byte[].
* @return A {@link BinaryData} that wraps the this event's data payload.
*/
public BinaryData getData() {
if (cloudEvent.getDataBase64() != null) {
return BinaryData.fromBytes(cloudEvent.getDataBase64());
}
return EventGridDeserializer.getData(cloudEvent.getData());
}
/**
* Set the data associated with this event.
* @param data the data to set.
*
* @return the cloud event itself.
*/
CloudEvent setData(Object data) {
this.cloudEvent.setData(data);
return this;
}
/**
* Set the Base64 data associated with this event.
* @param data the data to set.
* @param dataContentType the data content type of the CloudEvent.
*
* @return the cloud event itself.
*/
private CloudEvent setDataBase64(byte[] data, String dataContentType) {
if (data != null) {
byte[] encoded = Base64.getEncoder().encode(data);
this.cloudEvent.setDataBase64(encoded);
this.cloudEvent.setDatacontenttype(dataContentType);
}
return this;
}
/**
* Set the data content type with this event.
* @param dataContentType the data content type to set.
* @return the cloud event itself.
*/
public CloudEvent setDataContentType(String dataContentType) {
this.cloudEvent.setDatacontenttype(dataContentType);
return this;
}
/**
* Get the type of event, e.g. "Contoso.Items.ItemReceived".
* @return the type of the event.
*/
public String getType() {
return this.cloudEvent.getType();
}
/**
* Get the time associated with the occurrence of the event.
* @return the event time, or null if the time is not set.
*/
public OffsetDateTime getTime() {
return this.cloudEvent.getTime();
}
/**
* Set the time associated with the occurrence of the event.
* @param time the time to set.
*
* @return the cloud event itself.
*/
public CloudEvent setTime(OffsetDateTime time) {
this.cloudEvent.setTime(time);
return this;
}
/**
* Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the
* "application/json" type. Note that "application/json" is still a possible value for this field.
* @return the content type the data is in, or null if the data is nonexistent or in "application/json" format.
*/
public String getDataContentType() {
return this.cloudEvent.getDatacontenttype();
}
/**
* Get the schema that the data adheres to.
* @return a URI of the data schema, or null if it is not set.
*/
public String getDataSchema() {
return this.cloudEvent.getDataschema();
}
/**
* Set the schema that the data adheres to.
* @param dataSchema a URI identifying the schema of the data.
*
* @return the cloud event itself.
*/
public CloudEvent setDataSchema(String dataSchema) {
this.cloudEvent.setDataschema(dataSchema);
return this;
}
/**
* Get the subject associated with this event.
* @return the subject, or null if the subject was not set.
*/
public String getSubject() {
return this.cloudEvent.getSubject();
}
/**
* Set the subject of the event.
* @param subject the subject to set.
*
* @return the cloud event itself.
*/
public CloudEvent setSubject(String subject) {
this.cloudEvent.setSubject(subject);
return this;
}
/**
* Get a map of the additional user-defined attributes associated with this event.
* @return the extension attributes as an unmodifiable map.
*/
public Map<String, Object> getExtensionAttributes() {
if (this.cloudEvent.getAdditionalProperties() == null) {
return null;
}
return Collections.unmodifiableMap(this.cloudEvent.getAdditionalProperties());
}
/**
* Add/Overwrite a single extension attribute to the cloud event. The property name will be transformed
* to lowercase and must not share a name with any reserved cloud event properties.
* @param name the name of the attribute.
* @param value the value to associate with the name.
*
* @return the cloud event itself.
*/
public CloudEvent addExtensionAttribute(String name, Object value) {
if (this.cloudEvent.getAdditionalProperties() == null) {
this.cloudEvent.setAdditionalProperties(new HashMap<>());
}
this.cloudEvent.getAdditionalProperties().put(name.toLowerCase(Locale.ENGLISH), value);
return this;
}
CloudEvent(com.azure.messaging.eventgrid.implementation.models.CloudEvent impl) {
this.cloudEvent = impl;
}
com.azure.messaging.eventgrid.implementation.models.CloudEvent toImpl() {
return this.cloudEvent;
}
} | class CloudEvent {
private static final String SPEC_VERSION = "1.0";
private final com.azure.messaging.eventgrid.implementation.models.CloudEvent cloudEvent;
private static final ClientLogger logger = new ClientLogger(CloudEvent.class);
/**
* Create an instance of CloudEvent. The source and type are required fields to publish.
* @param source a URI identifying the origin of the event. It can't be null or empty.
* @param type the type of event, e.g. "Contoso.Items.ItemReceived". It can't be null or empty.
* @param data the payload of this event. Set to null if your event doesn't have the data payload.
* It will be serialized as a String if it's a String, or application/json if it's not a String.
* @throws NullPointerException if source or type is {@code null}.
*/
public CloudEvent(String source, String type, Object data) {
this(source, type);
this.setData(data);
}
/**
* Create an instance of CloudEvent. The source and type are required fields to publish.
* @param source a URI identifying the origin of the event.
* @param type the type of event, e.g. "Contoso.Items.ItemReceived".
* @param data the payload in bytes of this event. It will be serialized to Base64 format.
* @param dataContentType the type of the data.
* @throws NullPointerException if source or type is {@code null}.
*/
private CloudEvent(String source, String type) {
if (CoreUtils.isNullOrEmpty(source)) {
throw logger.logExceptionAsError(new IllegalArgumentException("'source' cannot be null or empty."));
}
if (CoreUtils.isNullOrEmpty(type)) {
throw logger.logExceptionAsError(new IllegalArgumentException("'type' cannot be null or empty."));
}
this.cloudEvent = new com.azure.messaging.eventgrid.implementation.models.CloudEvent()
.setId(UUID.randomUUID().toString())
.setSource(source)
.setType(type)
.setSpecversion(SPEC_VERSION);
}
/**
* Deserialize the {@link CloudEvent} from a JSON string.
* @param cloudEventJsonString the JSON payload containing one or more events.
*
* @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}.
* @throws IllegalArgumentException if cloudEventJsonString isn't a JSON string for a cloud event or an array of it.
* @throws NullPointerException if cloudEventJsonString is {@code null}.
*/
public static List<CloudEvent> fromString(String cloudEventJsonString) {
return EventGridDeserializer.deserializeCloudEvents(cloudEventJsonString);
}
/**
* Get the id of the cloud event.
* @return the id.
*/
public String getId() {
return this.cloudEvent.getId();
}
/**
* Set a custom id. Note that a random id is already set by default.
* @param id the id to set.
*
* @return the cloud event itself.
*/
public CloudEvent setId(String id) {
if (CoreUtils.isNullOrEmpty(id)) {
throw new IllegalArgumentException("id cannot be null or empty");
}
this.cloudEvent.setId(id);
return this;
}
/**
* Get the URI source of the event.
* @return the source.
*/
public String getSource() {
return this.cloudEvent.getSource();
}
/**
* Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into
* a String, an Object, or a byte[].
* @return A {@link BinaryData} that wraps the this event's data payload.
*/
public BinaryData getData() {
if (cloudEvent.getDataBase64() != null) {
return BinaryData.fromBytes(cloudEvent.getDataBase64());
}
return EventGridDeserializer.getData(cloudEvent.getData());
}
/**
* Set the data associated with this event.
* @param data the data to set.
*
* @return the cloud event itself.
*/
CloudEvent setData(Object data) {
this.cloudEvent.setData(data);
return this;
}
/**
* Set the Base64 data associated with this event.
* @param data the data to set.
* @param dataContentType the data content type of the CloudEvent.
*
* @return the cloud event itself.
*/
private CloudEvent setDataBase64(byte[] data, String dataContentType) {
if (data != null) {
byte[] encoded = Base64.getEncoder().encode(data);
this.cloudEvent.setDataBase64(encoded);
this.cloudEvent.setDatacontenttype(dataContentType);
}
return this;
}
/**
* Set the data content type with this event.
* @param dataContentType the data content type to set.
* @return the cloud event itself.
*/
public CloudEvent setDataContentType(String dataContentType) {
this.cloudEvent.setDatacontenttype(dataContentType);
return this;
}
/**
* Get the type of event, e.g. "Contoso.Items.ItemReceived".
* @return the type of the event.
*/
public String getType() {
return this.cloudEvent.getType();
}
/**
* Get the time associated with the occurrence of the event.
* @return the event time, or null if the time is not set.
*/
public OffsetDateTime getTime() {
return this.cloudEvent.getTime();
}
/**
* Set the time associated with the occurrence of the event.
* @param time the time to set.
*
* @return the cloud event itself.
*/
public CloudEvent setTime(OffsetDateTime time) {
this.cloudEvent.setTime(time);
return this;
}
/**
* Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the
* "application/json" type. Note that "application/json" is still a possible value for this field.
* @return the content type the data is in, or null if the data is nonexistent or in "application/json" format.
*/
public String getDataContentType() {
return this.cloudEvent.getDatacontenttype();
}
/**
* Get the schema that the data adheres to.
* @return a URI of the data schema, or null if it is not set.
*/
public String getDataSchema() {
return this.cloudEvent.getDataschema();
}
/**
* Set the schema that the data adheres to.
* @param dataSchema a URI identifying the schema of the data.
*
* @return the cloud event itself.
*/
public CloudEvent setDataSchema(String dataSchema) {
this.cloudEvent.setDataschema(dataSchema);
return this;
}
/**
* Get the subject associated with this event.
* @return the subject, or null if the subject was not set.
*/
public String getSubject() {
return this.cloudEvent.getSubject();
}
/**
* Set the subject of the event.
* @param subject the subject to set.
*
* @return the cloud event itself.
*/
public CloudEvent setSubject(String subject) {
this.cloudEvent.setSubject(subject);
return this;
}
/**
* Get a map of the additional user-defined attributes associated with this event.
* @return the extension attributes as an unmodifiable map.
*/
public Map<String, Object> getExtensionAttributes() {
if (this.cloudEvent.getAdditionalProperties() == null) {
return null;
}
return Collections.unmodifiableMap(this.cloudEvent.getAdditionalProperties());
}
/**
* Add/Overwrite a single extension attribute to the cloud event. The property name will be transformed
* to lowercase and must not share a name with any reserved cloud event properties.
* @param name the name of the attribute.
* @param value the value to associate with the name.
*
* @return the cloud event itself.
*/
public CloudEvent addExtensionAttribute(String name, Object value) {
if (this.cloudEvent.getAdditionalProperties() == null) {
this.cloudEvent.setAdditionalProperties(new HashMap<>());
}
this.cloudEvent.getAdditionalProperties().put(name.toLowerCase(Locale.ENGLISH), value);
return this;
}
CloudEvent(com.azure.messaging.eventgrid.implementation.models.CloudEvent impl) {
this.cloudEvent = impl;
}
com.azure.messaging.eventgrid.implementation.models.CloudEvent toImpl() {
return this.cloudEvent;
}
} |
Seems `listByScope` is easier? | public PagedIterable<ManagementLock> listForResource(String resourceId) {
return wrapList(this.manager().managementLockClient().getManagementLocks().listAtResourceLevel(
ResourceUtils.groupFromResourceId(resourceId),
ResourceUtils.resourceProviderFromResourceId(resourceId),
ResourceUtils.parentRelativePathFromResourceId(resourceId),
ResourceUtils.resourceTypeFromResourceId(resourceId),
ResourceUtils.nameFromResourceId(resourceId)));
} | return wrapList(this.manager().managementLockClient().getManagementLocks().listAtResourceLevel( | public PagedIterable<ManagementLock> listForResource(String resourceId) {
return wrapList(this.manager().managementLockClient().getManagementLocks().listByScope(resourceId));
} | class ManagementLocksImpl
extends CreatableResourcesImpl<ManagementLock, ManagementLockImpl, ManagementLockObjectInner>
implements ManagementLocks {
private final ResourceManager manager;
public ManagementLocksImpl(final ResourceManager manager) {
this.manager = manager;
}
/**
* Returns the part of the specified management lock resource ID
* representing the resource the lock is associated with.
* @param lockId a lock resource ID
* @return a resource ID
*/
public static String resourceIdFromLockId(String lockId) {
String[] lockIdParts = lockIdParts(lockId);
if (lockIdParts.length == 0) {
return null;
}
StringBuilder resourceId = new StringBuilder();
for (int i = 0; i < lockIdParts.length - 4; i++) {
if (!lockIdParts[i].isEmpty()) {
resourceId.append("/").append(lockIdParts[i]);
}
}
return resourceId.toString();
}
private static String[] lockIdParts(String lockId) {
if (lockId == null) {
return new String[0];
}
String[] parts = lockId.split("/");
if (parts.length < 4) {
return new String[0];
}
if (!parts[parts.length - 2].equalsIgnoreCase("locks")
|| !parts[parts.length - 3].equalsIgnoreCase("Microsoft.Authorization")
|| !parts[parts.length - 4].equalsIgnoreCase("providers")) {
return new String[0];
}
return parts;
}
@Override
public ManagementLockImpl define(String name) {
return wrapModel(name);
}
@Override
protected ManagementLockImpl wrapModel(String name) {
ManagementLockObjectInner inner = new ManagementLockObjectInner();
return new ManagementLockImpl(name, inner, this.manager());
}
@Override
protected ManagementLockImpl wrapModel(ManagementLockObjectInner inner) {
if (inner == null) {
return null;
}
return new ManagementLockImpl(inner.id(), inner, this.manager());
}
@Override
public PagedIterable<ManagementLock> list() {
return wrapList(this.manager().managementLockClient().getManagementLocks().list());
}
@Override
public PagedFlux<ManagementLock> listAsync() {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks().listAsync());
}
@Override
public Mono<Void> deleteByIdAsync(String id) {
String scope = resourceIdFromLockId(id);
String lockName = ResourceUtils.nameFromResourceId(id);
if (scope != null && lockName != null) {
return this.manager().managementLockClient().getManagementLocks().deleteByScopeAsync(scope, lockName);
} else {
return Mono.empty();
}
}
@Override
public PagedIterable<ManagementLock> listByResourceGroup(String resourceGroupName) {
return wrapList(this.manager().managementLockClient().getManagementLocks()
.listByResourceGroup(resourceGroupName));
}
@Override
public PagedFlux<ManagementLock> listByResourceGroupAsync(String resourceGroupName) {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks()
.listByResourceGroupAsync(resourceGroupName));
}
@Override
public ManagementLock getByResourceGroup(String resourceGroupName, String name) {
return this.getByResourceGroupAsync(resourceGroupName, name).block();
}
@Override
public Mono<ManagementLock> getByResourceGroupAsync(String resourceGroupName, String name) {
return this.manager().managementLockClient().getManagementLocks()
.getByResourceGroupAsync(resourceGroupName, name)
.map(this::wrapModel);
}
@Override
public ManagementLock getById(String id) {
return this.getByIdAsync(id).block();
}
@Override
public Mono<ManagementLock> getByIdAsync(String id) {
String resourceId = resourceIdFromLockId(id);
String lockName = ResourceUtils.nameFromResourceId(id);
return this.manager().managementLockClient().getManagementLocks().getByScopeAsync(resourceId, lockName)
.map(this::wrapModel);
}
@Override
public void deleteByResourceGroup(String resourceGroupName, String name) {
this.deleteByResourceGroupAsync(resourceGroupName, name).block();
}
@Override
public Mono<Void> deleteByResourceGroupAsync(String resourceGroupName, String name) {
return this.manager().managementLockClient().getManagementLocks()
.deleteAtResourceGroupLevelAsync(resourceGroupName, name);
}
@Override
public Flux<String> deleteByIdsAsync(Collection<String> ids) {
if (ids == null || ids.isEmpty()) {
return Flux.empty();
}
return Flux.fromIterable(ids)
.flatMapDelayError(id -> {
String lockName = ResourceUtils.nameFromResourceId(id);
String scopeName = ManagementLocksImpl.resourceIdFromLockId(id);
return this.manager().managementLockClient().getManagementLocks()
.deleteByScopeAsync(scopeName, lockName)
.then(Mono.just(id));
}, 32, 32);
}
@Override
public Flux<String> deleteByIdsAsync(String... ids) {
return this.deleteByIdsAsync(new ArrayList<>(Arrays.asList(ids)));
}
@Override
public void deleteByIds(Collection<String> ids) {
this.deleteByIdsAsync(ids).blockLast();
}
@Override
public void deleteByIds(String... ids) {
this.deleteByIdsAsync(ids).blockLast();
}
@Override
public ResourceManager manager() {
return this.manager;
}
@Override
@Override
public PagedFlux<ManagementLock> listForResourceAsync(String resourceId) {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks().listAtResourceLevelAsync(
ResourceUtils.groupFromResourceId(resourceId),
ResourceUtils.resourceProviderFromResourceId(resourceId),
ResourceUtils.parentRelativePathFromResourceId(resourceId),
ResourceUtils.resourceTypeFromResourceId(resourceId),
ResourceUtils.nameFromResourceId(resourceId)));
}
} | class ManagementLocksImpl
extends CreatableResourcesImpl<ManagementLock, ManagementLockImpl, ManagementLockObjectInner>
implements ManagementLocks {
private final ResourceManager manager;
public ManagementLocksImpl(final ResourceManager manager) {
this.manager = manager;
}
/**
* Returns the part of the specified management lock resource ID
* representing the resource the lock is associated with.
* @param lockId a lock resource ID
* @return a resource ID
*/
static String resourceIdFromLockId(String lockId) {
String[] lockIdParts = lockIdParts(lockId);
if (CoreUtils.isNullOrEmpty(lockIdParts)) {
return null;
}
StringBuilder resourceId = new StringBuilder();
for (int i = 0; i < lockIdParts.length - 4; i++) {
if (!lockIdParts[i].isEmpty()) {
resourceId.append("/").append(lockIdParts[i]);
}
}
return resourceId.toString();
}
private static String[] lockIdParts(String lockId) {
if (CoreUtils.isNullOrEmpty(lockId)) {
return new String[0];
}
String[] parts = lockId.split("/");
if (parts.length < 4) {
return new String[0];
}
if (!parts[parts.length - 2].equalsIgnoreCase("locks")
|| !parts[parts.length - 3].equalsIgnoreCase("Microsoft.Authorization")
|| !parts[parts.length - 4].equalsIgnoreCase("providers")) {
return new String[0];
}
return parts;
}
@Override
public ManagementLockImpl define(String name) {
return wrapModel(name);
}
@Override
protected ManagementLockImpl wrapModel(String name) {
ManagementLockObjectInner inner = new ManagementLockObjectInner();
return new ManagementLockImpl(name, inner, this.manager());
}
@Override
protected ManagementLockImpl wrapModel(ManagementLockObjectInner inner) {
if (inner == null) {
return null;
}
return new ManagementLockImpl(inner.name(), inner, this.manager());
}
@Override
public PagedIterable<ManagementLock> list() {
return wrapList(this.manager().managementLockClient().getManagementLocks().list());
}
@Override
public PagedFlux<ManagementLock> listAsync() {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks().listAsync());
}
@Override
public Mono<Void> deleteByIdAsync(String id) {
String scope = resourceIdFromLockId(id);
String lockName = ResourceUtils.nameFromResourceId(id);
if (scope != null && lockName != null) {
return this.manager().managementLockClient().getManagementLocks().deleteByScopeAsync(scope, lockName);
} else {
return Mono.empty();
}
}
@Override
public PagedIterable<ManagementLock> listByResourceGroup(String resourceGroupName) {
return wrapList(this.manager().managementLockClient().getManagementLocks()
.listByResourceGroup(resourceGroupName));
}
@Override
public PagedFlux<ManagementLock> listByResourceGroupAsync(String resourceGroupName) {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks()
.listByResourceGroupAsync(resourceGroupName));
}
@Override
public ManagementLock getByResourceGroup(String resourceGroupName, String name) {
return this.getByResourceGroupAsync(resourceGroupName, name).block();
}
@Override
public Mono<ManagementLock> getByResourceGroupAsync(String resourceGroupName, String name) {
return this.manager().managementLockClient().getManagementLocks()
.getByResourceGroupAsync(resourceGroupName, name)
.map(this::wrapModel);
}
@Override
public ManagementLock getById(String id) {
return this.getByIdAsync(id).block();
}
@Override
public Mono<ManagementLock> getByIdAsync(String id) {
String resourceId = resourceIdFromLockId(id);
String lockName = ResourceUtils.nameFromResourceId(id);
return this.manager().managementLockClient().getManagementLocks().getByScopeAsync(resourceId, lockName)
.map(this::wrapModel);
}
@Override
public void deleteByResourceGroup(String resourceGroupName, String name) {
this.deleteByResourceGroupAsync(resourceGroupName, name).block();
}
@Override
public Mono<Void> deleteByResourceGroupAsync(String resourceGroupName, String name) {
return this.manager().managementLockClient().getManagementLocks()
.deleteAtResourceGroupLevelAsync(resourceGroupName, name);
}
@Override
public Flux<String> deleteByIdsAsync(Collection<String> ids) {
if (ids == null || ids.isEmpty()) {
return Flux.empty();
}
return Flux.fromIterable(ids)
.flatMapDelayError(id -> {
String lockName = ResourceUtils.nameFromResourceId(id);
String scopeName = ManagementLocksImpl.resourceIdFromLockId(id);
return this.manager().managementLockClient().getManagementLocks()
.deleteByScopeAsync(scopeName, lockName)
.then(Mono.just(id));
}, 32, 32);
}
@Override
public Flux<String> deleteByIdsAsync(String... ids) {
return this.deleteByIdsAsync(new ArrayList<>(Arrays.asList(ids)));
}
@Override
public void deleteByIds(Collection<String> ids) {
this.deleteByIdsAsync(ids).blockLast();
}
@Override
public void deleteByIds(String... ids) {
this.deleteByIdsAsync(ids).blockLast();
}
@Override
public ResourceManager manager() {
return this.manager;
}
@Override
@Override
public PagedFlux<ManagementLock> listForResourceAsync(String resourceId) {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks().listByScopeAsync(resourceId));
}
} |
By testing, `listByScope` should be the right one as parsing id would fail if it is resource group id here. | public PagedIterable<ManagementLock> listForResource(String resourceId) {
return wrapList(this.manager().managementLockClient().getManagementLocks().listAtResourceLevel(
ResourceUtils.groupFromResourceId(resourceId),
ResourceUtils.resourceProviderFromResourceId(resourceId),
ResourceUtils.parentRelativePathFromResourceId(resourceId),
ResourceUtils.resourceTypeFromResourceId(resourceId),
ResourceUtils.nameFromResourceId(resourceId)));
} | return wrapList(this.manager().managementLockClient().getManagementLocks().listAtResourceLevel( | public PagedIterable<ManagementLock> listForResource(String resourceId) {
return wrapList(this.manager().managementLockClient().getManagementLocks().listByScope(resourceId));
} | class ManagementLocksImpl
extends CreatableResourcesImpl<ManagementLock, ManagementLockImpl, ManagementLockObjectInner>
implements ManagementLocks {
private final ResourceManager manager;
public ManagementLocksImpl(final ResourceManager manager) {
this.manager = manager;
}
/**
* Returns the part of the specified management lock resource ID
* representing the resource the lock is associated with.
* @param lockId a lock resource ID
* @return a resource ID
*/
public static String resourceIdFromLockId(String lockId) {
String[] lockIdParts = lockIdParts(lockId);
if (lockIdParts.length == 0) {
return null;
}
StringBuilder resourceId = new StringBuilder();
for (int i = 0; i < lockIdParts.length - 4; i++) {
if (!lockIdParts[i].isEmpty()) {
resourceId.append("/").append(lockIdParts[i]);
}
}
return resourceId.toString();
}
private static String[] lockIdParts(String lockId) {
if (lockId == null) {
return new String[0];
}
String[] parts = lockId.split("/");
if (parts.length < 4) {
return new String[0];
}
if (!parts[parts.length - 2].equalsIgnoreCase("locks")
|| !parts[parts.length - 3].equalsIgnoreCase("Microsoft.Authorization")
|| !parts[parts.length - 4].equalsIgnoreCase("providers")) {
return new String[0];
}
return parts;
}
@Override
public ManagementLockImpl define(String name) {
return wrapModel(name);
}
@Override
protected ManagementLockImpl wrapModel(String name) {
ManagementLockObjectInner inner = new ManagementLockObjectInner();
return new ManagementLockImpl(name, inner, this.manager());
}
@Override
protected ManagementLockImpl wrapModel(ManagementLockObjectInner inner) {
if (inner == null) {
return null;
}
return new ManagementLockImpl(inner.id(), inner, this.manager());
}
@Override
public PagedIterable<ManagementLock> list() {
return wrapList(this.manager().managementLockClient().getManagementLocks().list());
}
@Override
public PagedFlux<ManagementLock> listAsync() {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks().listAsync());
}
@Override
public Mono<Void> deleteByIdAsync(String id) {
String scope = resourceIdFromLockId(id);
String lockName = ResourceUtils.nameFromResourceId(id);
if (scope != null && lockName != null) {
return this.manager().managementLockClient().getManagementLocks().deleteByScopeAsync(scope, lockName);
} else {
return Mono.empty();
}
}
@Override
public PagedIterable<ManagementLock> listByResourceGroup(String resourceGroupName) {
return wrapList(this.manager().managementLockClient().getManagementLocks()
.listByResourceGroup(resourceGroupName));
}
@Override
public PagedFlux<ManagementLock> listByResourceGroupAsync(String resourceGroupName) {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks()
.listByResourceGroupAsync(resourceGroupName));
}
@Override
public ManagementLock getByResourceGroup(String resourceGroupName, String name) {
return this.getByResourceGroupAsync(resourceGroupName, name).block();
}
@Override
public Mono<ManagementLock> getByResourceGroupAsync(String resourceGroupName, String name) {
return this.manager().managementLockClient().getManagementLocks()
.getByResourceGroupAsync(resourceGroupName, name)
.map(this::wrapModel);
}
@Override
public ManagementLock getById(String id) {
return this.getByIdAsync(id).block();
}
@Override
public Mono<ManagementLock> getByIdAsync(String id) {
String resourceId = resourceIdFromLockId(id);
String lockName = ResourceUtils.nameFromResourceId(id);
return this.manager().managementLockClient().getManagementLocks().getByScopeAsync(resourceId, lockName)
.map(this::wrapModel);
}
@Override
public void deleteByResourceGroup(String resourceGroupName, String name) {
this.deleteByResourceGroupAsync(resourceGroupName, name).block();
}
@Override
public Mono<Void> deleteByResourceGroupAsync(String resourceGroupName, String name) {
return this.manager().managementLockClient().getManagementLocks()
.deleteAtResourceGroupLevelAsync(resourceGroupName, name);
}
@Override
public Flux<String> deleteByIdsAsync(Collection<String> ids) {
if (ids == null || ids.isEmpty()) {
return Flux.empty();
}
return Flux.fromIterable(ids)
.flatMapDelayError(id -> {
String lockName = ResourceUtils.nameFromResourceId(id);
String scopeName = ManagementLocksImpl.resourceIdFromLockId(id);
return this.manager().managementLockClient().getManagementLocks()
.deleteByScopeAsync(scopeName, lockName)
.then(Mono.just(id));
}, 32, 32);
}
@Override
public Flux<String> deleteByIdsAsync(String... ids) {
return this.deleteByIdsAsync(new ArrayList<>(Arrays.asList(ids)));
}
@Override
public void deleteByIds(Collection<String> ids) {
this.deleteByIdsAsync(ids).blockLast();
}
@Override
public void deleteByIds(String... ids) {
this.deleteByIdsAsync(ids).blockLast();
}
@Override
public ResourceManager manager() {
return this.manager;
}
@Override
@Override
public PagedFlux<ManagementLock> listForResourceAsync(String resourceId) {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks().listAtResourceLevelAsync(
ResourceUtils.groupFromResourceId(resourceId),
ResourceUtils.resourceProviderFromResourceId(resourceId),
ResourceUtils.parentRelativePathFromResourceId(resourceId),
ResourceUtils.resourceTypeFromResourceId(resourceId),
ResourceUtils.nameFromResourceId(resourceId)));
}
} | class ManagementLocksImpl
extends CreatableResourcesImpl<ManagementLock, ManagementLockImpl, ManagementLockObjectInner>
implements ManagementLocks {
private final ResourceManager manager;
public ManagementLocksImpl(final ResourceManager manager) {
this.manager = manager;
}
/**
* Returns the part of the specified management lock resource ID
* representing the resource the lock is associated with.
* @param lockId a lock resource ID
* @return a resource ID
*/
static String resourceIdFromLockId(String lockId) {
String[] lockIdParts = lockIdParts(lockId);
if (CoreUtils.isNullOrEmpty(lockIdParts)) {
return null;
}
StringBuilder resourceId = new StringBuilder();
for (int i = 0; i < lockIdParts.length - 4; i++) {
if (!lockIdParts[i].isEmpty()) {
resourceId.append("/").append(lockIdParts[i]);
}
}
return resourceId.toString();
}
private static String[] lockIdParts(String lockId) {
if (CoreUtils.isNullOrEmpty(lockId)) {
return new String[0];
}
String[] parts = lockId.split("/");
if (parts.length < 4) {
return new String[0];
}
if (!parts[parts.length - 2].equalsIgnoreCase("locks")
|| !parts[parts.length - 3].equalsIgnoreCase("Microsoft.Authorization")
|| !parts[parts.length - 4].equalsIgnoreCase("providers")) {
return new String[0];
}
return parts;
}
@Override
public ManagementLockImpl define(String name) {
return wrapModel(name);
}
@Override
protected ManagementLockImpl wrapModel(String name) {
ManagementLockObjectInner inner = new ManagementLockObjectInner();
return new ManagementLockImpl(name, inner, this.manager());
}
@Override
protected ManagementLockImpl wrapModel(ManagementLockObjectInner inner) {
if (inner == null) {
return null;
}
return new ManagementLockImpl(inner.name(), inner, this.manager());
}
@Override
public PagedIterable<ManagementLock> list() {
return wrapList(this.manager().managementLockClient().getManagementLocks().list());
}
@Override
public PagedFlux<ManagementLock> listAsync() {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks().listAsync());
}
@Override
public Mono<Void> deleteByIdAsync(String id) {
String scope = resourceIdFromLockId(id);
String lockName = ResourceUtils.nameFromResourceId(id);
if (scope != null && lockName != null) {
return this.manager().managementLockClient().getManagementLocks().deleteByScopeAsync(scope, lockName);
} else {
return Mono.empty();
}
}
@Override
public PagedIterable<ManagementLock> listByResourceGroup(String resourceGroupName) {
return wrapList(this.manager().managementLockClient().getManagementLocks()
.listByResourceGroup(resourceGroupName));
}
@Override
public PagedFlux<ManagementLock> listByResourceGroupAsync(String resourceGroupName) {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks()
.listByResourceGroupAsync(resourceGroupName));
}
@Override
public ManagementLock getByResourceGroup(String resourceGroupName, String name) {
return this.getByResourceGroupAsync(resourceGroupName, name).block();
}
@Override
public Mono<ManagementLock> getByResourceGroupAsync(String resourceGroupName, String name) {
return this.manager().managementLockClient().getManagementLocks()
.getByResourceGroupAsync(resourceGroupName, name)
.map(this::wrapModel);
}
@Override
public ManagementLock getById(String id) {
return this.getByIdAsync(id).block();
}
@Override
public Mono<ManagementLock> getByIdAsync(String id) {
String resourceId = resourceIdFromLockId(id);
String lockName = ResourceUtils.nameFromResourceId(id);
return this.manager().managementLockClient().getManagementLocks().getByScopeAsync(resourceId, lockName)
.map(this::wrapModel);
}
@Override
public void deleteByResourceGroup(String resourceGroupName, String name) {
this.deleteByResourceGroupAsync(resourceGroupName, name).block();
}
@Override
public Mono<Void> deleteByResourceGroupAsync(String resourceGroupName, String name) {
return this.manager().managementLockClient().getManagementLocks()
.deleteAtResourceGroupLevelAsync(resourceGroupName, name);
}
@Override
public Flux<String> deleteByIdsAsync(Collection<String> ids) {
if (ids == null || ids.isEmpty()) {
return Flux.empty();
}
return Flux.fromIterable(ids)
.flatMapDelayError(id -> {
String lockName = ResourceUtils.nameFromResourceId(id);
String scopeName = ManagementLocksImpl.resourceIdFromLockId(id);
return this.manager().managementLockClient().getManagementLocks()
.deleteByScopeAsync(scopeName, lockName)
.then(Mono.just(id));
}, 32, 32);
}
@Override
public Flux<String> deleteByIdsAsync(String... ids) {
return this.deleteByIdsAsync(new ArrayList<>(Arrays.asList(ids)));
}
@Override
public void deleteByIds(Collection<String> ids) {
this.deleteByIdsAsync(ids).blockLast();
}
@Override
public void deleteByIds(String... ids) {
this.deleteByIdsAsync(ids).blockLast();
}
@Override
public ResourceManager manager() {
return this.manager;
}
@Override
@Override
public PagedFlux<ManagementLock> listForResourceAsync(String resourceId) {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks().listByScopeAsync(resourceId));
}
} |
Not exactly sure whether we should throw exception. | private static String[] lockIdParts(String lockId) {
if (CoreUtils.isNullOrEmpty(lockId)) {
return new String[0];
}
String[] parts = lockId.split("/");
if (parts.length < 4) {
return new String[0];
}
if (!parts[parts.length - 2].equalsIgnoreCase("locks")
|| !parts[parts.length - 3].equalsIgnoreCase("Microsoft.Authorization")
|| !parts[parts.length - 4].equalsIgnoreCase("providers")) {
return new String[0];
}
return parts;
} | return new String[0]; | private static String[] lockIdParts(String lockId) {
if (CoreUtils.isNullOrEmpty(lockId)) {
return new String[0];
}
String[] parts = lockId.split("/");
if (parts.length < 4) {
return new String[0];
}
if (!parts[parts.length - 2].equalsIgnoreCase("locks")
|| !parts[parts.length - 3].equalsIgnoreCase("Microsoft.Authorization")
|| !parts[parts.length - 4].equalsIgnoreCase("providers")) {
return new String[0];
}
return parts;
} | class ManagementLocksImpl
extends CreatableResourcesImpl<ManagementLock, ManagementLockImpl, ManagementLockObjectInner>
implements ManagementLocks {
private final ResourceManager manager;
public ManagementLocksImpl(final ResourceManager manager) {
this.manager = manager;
}
/**
* Returns the part of the specified management lock resource ID
* representing the resource the lock is associated with.
* @param lockId a lock resource ID
* @return a resource ID
*/
static String resourceIdFromLockId(String lockId) {
String[] lockIdParts = lockIdParts(lockId);
if (CoreUtils.isNullOrEmpty(lockIdParts)) {
return null;
}
StringBuilder resourceId = new StringBuilder();
for (int i = 0; i < lockIdParts.length - 4; i++) {
if (!lockIdParts[i].isEmpty()) {
resourceId.append("/").append(lockIdParts[i]);
}
}
return resourceId.toString();
}
@Override
public ManagementLockImpl define(String name) {
return wrapModel(name);
}
@Override
protected ManagementLockImpl wrapModel(String name) {
ManagementLockObjectInner inner = new ManagementLockObjectInner();
return new ManagementLockImpl(name, inner, this.manager());
}
@Override
protected ManagementLockImpl wrapModel(ManagementLockObjectInner inner) {
if (inner == null) {
return null;
}
return new ManagementLockImpl(inner.name(), inner, this.manager());
}
@Override
public PagedIterable<ManagementLock> list() {
return wrapList(this.manager().managementLockClient().getManagementLocks().list());
}
@Override
public PagedFlux<ManagementLock> listAsync() {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks().listAsync());
}
@Override
public Mono<Void> deleteByIdAsync(String id) {
String scope = resourceIdFromLockId(id);
String lockName = ResourceUtils.nameFromResourceId(id);
if (scope != null && lockName != null) {
return this.manager().managementLockClient().getManagementLocks().deleteByScopeAsync(scope, lockName);
} else {
return Mono.empty();
}
}
@Override
public PagedIterable<ManagementLock> listByResourceGroup(String resourceGroupName) {
return wrapList(this.manager().managementLockClient().getManagementLocks()
.listByResourceGroup(resourceGroupName));
}
@Override
public PagedFlux<ManagementLock> listByResourceGroupAsync(String resourceGroupName) {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks()
.listByResourceGroupAsync(resourceGroupName));
}
@Override
public ManagementLock getByResourceGroup(String resourceGroupName, String name) {
return this.getByResourceGroupAsync(resourceGroupName, name).block();
}
@Override
public Mono<ManagementLock> getByResourceGroupAsync(String resourceGroupName, String name) {
return this.manager().managementLockClient().getManagementLocks()
.getByResourceGroupAsync(resourceGroupName, name)
.map(this::wrapModel);
}
@Override
public ManagementLock getById(String id) {
return this.getByIdAsync(id).block();
}
@Override
public Mono<ManagementLock> getByIdAsync(String id) {
String resourceId = resourceIdFromLockId(id);
String lockName = ResourceUtils.nameFromResourceId(id);
return this.manager().managementLockClient().getManagementLocks().getByScopeAsync(resourceId, lockName)
.map(this::wrapModel);
}
@Override
public void deleteByResourceGroup(String resourceGroupName, String name) {
this.deleteByResourceGroupAsync(resourceGroupName, name).block();
}
@Override
public Mono<Void> deleteByResourceGroupAsync(String resourceGroupName, String name) {
return this.manager().managementLockClient().getManagementLocks()
.deleteAtResourceGroupLevelAsync(resourceGroupName, name);
}
@Override
public Flux<String> deleteByIdsAsync(Collection<String> ids) {
if (ids == null || ids.isEmpty()) {
return Flux.empty();
}
return Flux.fromIterable(ids)
.flatMapDelayError(id -> {
String lockName = ResourceUtils.nameFromResourceId(id);
String scopeName = ManagementLocksImpl.resourceIdFromLockId(id);
return this.manager().managementLockClient().getManagementLocks()
.deleteByScopeAsync(scopeName, lockName)
.then(Mono.just(id));
}, 32, 32);
}
@Override
public Flux<String> deleteByIdsAsync(String... ids) {
return this.deleteByIdsAsync(new ArrayList<>(Arrays.asList(ids)));
}
@Override
public void deleteByIds(Collection<String> ids) {
this.deleteByIdsAsync(ids).blockLast();
}
@Override
public void deleteByIds(String... ids) {
this.deleteByIdsAsync(ids).blockLast();
}
@Override
public ResourceManager manager() {
return this.manager;
}
@Override
public PagedIterable<ManagementLock> listForResource(String resourceId) {
return wrapList(this.manager().managementLockClient().getManagementLocks().listByScope(resourceId));
}
@Override
public PagedFlux<ManagementLock> listForResourceAsync(String resourceId) {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks().listByScopeAsync(resourceId));
}
} | class ManagementLocksImpl
extends CreatableResourcesImpl<ManagementLock, ManagementLockImpl, ManagementLockObjectInner>
implements ManagementLocks {
private final ResourceManager manager;
public ManagementLocksImpl(final ResourceManager manager) {
this.manager = manager;
}
/**
* Returns the part of the specified management lock resource ID
* representing the resource the lock is associated with.
* @param lockId a lock resource ID
* @return a resource ID
*/
static String resourceIdFromLockId(String lockId) {
String[] lockIdParts = lockIdParts(lockId);
if (CoreUtils.isNullOrEmpty(lockIdParts)) {
return null;
}
StringBuilder resourceId = new StringBuilder();
for (int i = 0; i < lockIdParts.length - 4; i++) {
if (!lockIdParts[i].isEmpty()) {
resourceId.append("/").append(lockIdParts[i]);
}
}
return resourceId.toString();
}
@Override
public ManagementLockImpl define(String name) {
return wrapModel(name);
}
@Override
protected ManagementLockImpl wrapModel(String name) {
ManagementLockObjectInner inner = new ManagementLockObjectInner();
return new ManagementLockImpl(name, inner, this.manager());
}
@Override
protected ManagementLockImpl wrapModel(ManagementLockObjectInner inner) {
if (inner == null) {
return null;
}
return new ManagementLockImpl(inner.name(), inner, this.manager());
}
@Override
public PagedIterable<ManagementLock> list() {
return wrapList(this.manager().managementLockClient().getManagementLocks().list());
}
@Override
public PagedFlux<ManagementLock> listAsync() {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks().listAsync());
}
@Override
public Mono<Void> deleteByIdAsync(String id) {
String scope = resourceIdFromLockId(id);
String lockName = ResourceUtils.nameFromResourceId(id);
if (scope != null && lockName != null) {
return this.manager().managementLockClient().getManagementLocks().deleteByScopeAsync(scope, lockName);
} else {
return Mono.empty();
}
}
@Override
public PagedIterable<ManagementLock> listByResourceGroup(String resourceGroupName) {
return wrapList(this.manager().managementLockClient().getManagementLocks()
.listByResourceGroup(resourceGroupName));
}
@Override
public PagedFlux<ManagementLock> listByResourceGroupAsync(String resourceGroupName) {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks()
.listByResourceGroupAsync(resourceGroupName));
}
@Override
public ManagementLock getByResourceGroup(String resourceGroupName, String name) {
return this.getByResourceGroupAsync(resourceGroupName, name).block();
}
@Override
public Mono<ManagementLock> getByResourceGroupAsync(String resourceGroupName, String name) {
return this.manager().managementLockClient().getManagementLocks()
.getByResourceGroupAsync(resourceGroupName, name)
.map(this::wrapModel);
}
@Override
public ManagementLock getById(String id) {
return this.getByIdAsync(id).block();
}
@Override
public Mono<ManagementLock> getByIdAsync(String id) {
String resourceId = resourceIdFromLockId(id);
String lockName = ResourceUtils.nameFromResourceId(id);
return this.manager().managementLockClient().getManagementLocks().getByScopeAsync(resourceId, lockName)
.map(this::wrapModel);
}
@Override
public void deleteByResourceGroup(String resourceGroupName, String name) {
this.deleteByResourceGroupAsync(resourceGroupName, name).block();
}
@Override
public Mono<Void> deleteByResourceGroupAsync(String resourceGroupName, String name) {
return this.manager().managementLockClient().getManagementLocks()
.deleteAtResourceGroupLevelAsync(resourceGroupName, name);
}
@Override
public Flux<String> deleteByIdsAsync(Collection<String> ids) {
if (ids == null || ids.isEmpty()) {
return Flux.empty();
}
return Flux.fromIterable(ids)
.flatMapDelayError(id -> {
String lockName = ResourceUtils.nameFromResourceId(id);
String scopeName = ManagementLocksImpl.resourceIdFromLockId(id);
return this.manager().managementLockClient().getManagementLocks()
.deleteByScopeAsync(scopeName, lockName)
.then(Mono.just(id));
}, 32, 32);
}
@Override
public Flux<String> deleteByIdsAsync(String... ids) {
return this.deleteByIdsAsync(new ArrayList<>(Arrays.asList(ids)));
}
@Override
public void deleteByIds(Collection<String> ids) {
this.deleteByIdsAsync(ids).blockLast();
}
@Override
public void deleteByIds(String... ids) {
this.deleteByIdsAsync(ids).blockLast();
}
@Override
public ResourceManager manager() {
return this.manager;
}
@Override
public PagedIterable<ManagementLock> listForResource(String resourceId) {
return wrapList(this.manager().managementLockClient().getManagementLocks().listByScope(resourceId));
}
@Override
public PagedFlux<ManagementLock> listForResourceAsync(String resourceId) {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks().listByScopeAsync(resourceId));
}
} |
This can be used after a Get/List operation for lockedResourceId. It might be better not throwing here. | private static String[] lockIdParts(String lockId) {
if (CoreUtils.isNullOrEmpty(lockId)) {
return new String[0];
}
String[] parts = lockId.split("/");
if (parts.length < 4) {
return new String[0];
}
if (!parts[parts.length - 2].equalsIgnoreCase("locks")
|| !parts[parts.length - 3].equalsIgnoreCase("Microsoft.Authorization")
|| !parts[parts.length - 4].equalsIgnoreCase("providers")) {
return new String[0];
}
return parts;
} | return new String[0]; | private static String[] lockIdParts(String lockId) {
if (CoreUtils.isNullOrEmpty(lockId)) {
return new String[0];
}
String[] parts = lockId.split("/");
if (parts.length < 4) {
return new String[0];
}
if (!parts[parts.length - 2].equalsIgnoreCase("locks")
|| !parts[parts.length - 3].equalsIgnoreCase("Microsoft.Authorization")
|| !parts[parts.length - 4].equalsIgnoreCase("providers")) {
return new String[0];
}
return parts;
} | class ManagementLocksImpl
extends CreatableResourcesImpl<ManagementLock, ManagementLockImpl, ManagementLockObjectInner>
implements ManagementLocks {
private final ResourceManager manager;
public ManagementLocksImpl(final ResourceManager manager) {
this.manager = manager;
}
/**
* Returns the part of the specified management lock resource ID
* representing the resource the lock is associated with.
* @param lockId a lock resource ID
* @return a resource ID
*/
static String resourceIdFromLockId(String lockId) {
String[] lockIdParts = lockIdParts(lockId);
if (CoreUtils.isNullOrEmpty(lockIdParts)) {
return null;
}
StringBuilder resourceId = new StringBuilder();
for (int i = 0; i < lockIdParts.length - 4; i++) {
if (!lockIdParts[i].isEmpty()) {
resourceId.append("/").append(lockIdParts[i]);
}
}
return resourceId.toString();
}
@Override
public ManagementLockImpl define(String name) {
return wrapModel(name);
}
@Override
protected ManagementLockImpl wrapModel(String name) {
ManagementLockObjectInner inner = new ManagementLockObjectInner();
return new ManagementLockImpl(name, inner, this.manager());
}
@Override
protected ManagementLockImpl wrapModel(ManagementLockObjectInner inner) {
if (inner == null) {
return null;
}
return new ManagementLockImpl(inner.name(), inner, this.manager());
}
@Override
public PagedIterable<ManagementLock> list() {
return wrapList(this.manager().managementLockClient().getManagementLocks().list());
}
@Override
public PagedFlux<ManagementLock> listAsync() {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks().listAsync());
}
@Override
public Mono<Void> deleteByIdAsync(String id) {
String scope = resourceIdFromLockId(id);
String lockName = ResourceUtils.nameFromResourceId(id);
if (scope != null && lockName != null) {
return this.manager().managementLockClient().getManagementLocks().deleteByScopeAsync(scope, lockName);
} else {
return Mono.empty();
}
}
@Override
public PagedIterable<ManagementLock> listByResourceGroup(String resourceGroupName) {
return wrapList(this.manager().managementLockClient().getManagementLocks()
.listByResourceGroup(resourceGroupName));
}
@Override
public PagedFlux<ManagementLock> listByResourceGroupAsync(String resourceGroupName) {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks()
.listByResourceGroupAsync(resourceGroupName));
}
@Override
public ManagementLock getByResourceGroup(String resourceGroupName, String name) {
return this.getByResourceGroupAsync(resourceGroupName, name).block();
}
@Override
public Mono<ManagementLock> getByResourceGroupAsync(String resourceGroupName, String name) {
return this.manager().managementLockClient().getManagementLocks()
.getByResourceGroupAsync(resourceGroupName, name)
.map(this::wrapModel);
}
@Override
public ManagementLock getById(String id) {
return this.getByIdAsync(id).block();
}
@Override
public Mono<ManagementLock> getByIdAsync(String id) {
String resourceId = resourceIdFromLockId(id);
String lockName = ResourceUtils.nameFromResourceId(id);
return this.manager().managementLockClient().getManagementLocks().getByScopeAsync(resourceId, lockName)
.map(this::wrapModel);
}
@Override
public void deleteByResourceGroup(String resourceGroupName, String name) {
this.deleteByResourceGroupAsync(resourceGroupName, name).block();
}
@Override
public Mono<Void> deleteByResourceGroupAsync(String resourceGroupName, String name) {
return this.manager().managementLockClient().getManagementLocks()
.deleteAtResourceGroupLevelAsync(resourceGroupName, name);
}
@Override
public Flux<String> deleteByIdsAsync(Collection<String> ids) {
if (ids == null || ids.isEmpty()) {
return Flux.empty();
}
return Flux.fromIterable(ids)
.flatMapDelayError(id -> {
String lockName = ResourceUtils.nameFromResourceId(id);
String scopeName = ManagementLocksImpl.resourceIdFromLockId(id);
return this.manager().managementLockClient().getManagementLocks()
.deleteByScopeAsync(scopeName, lockName)
.then(Mono.just(id));
}, 32, 32);
}
@Override
public Flux<String> deleteByIdsAsync(String... ids) {
return this.deleteByIdsAsync(new ArrayList<>(Arrays.asList(ids)));
}
@Override
public void deleteByIds(Collection<String> ids) {
this.deleteByIdsAsync(ids).blockLast();
}
@Override
public void deleteByIds(String... ids) {
this.deleteByIdsAsync(ids).blockLast();
}
@Override
public ResourceManager manager() {
return this.manager;
}
@Override
public PagedIterable<ManagementLock> listForResource(String resourceId) {
return wrapList(this.manager().managementLockClient().getManagementLocks().listByScope(resourceId));
}
@Override
public PagedFlux<ManagementLock> listForResourceAsync(String resourceId) {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks().listByScopeAsync(resourceId));
}
} | class ManagementLocksImpl
extends CreatableResourcesImpl<ManagementLock, ManagementLockImpl, ManagementLockObjectInner>
implements ManagementLocks {
private final ResourceManager manager;
public ManagementLocksImpl(final ResourceManager manager) {
this.manager = manager;
}
/**
* Returns the part of the specified management lock resource ID
* representing the resource the lock is associated with.
* @param lockId a lock resource ID
* @return a resource ID
*/
static String resourceIdFromLockId(String lockId) {
String[] lockIdParts = lockIdParts(lockId);
if (CoreUtils.isNullOrEmpty(lockIdParts)) {
return null;
}
StringBuilder resourceId = new StringBuilder();
for (int i = 0; i < lockIdParts.length - 4; i++) {
if (!lockIdParts[i].isEmpty()) {
resourceId.append("/").append(lockIdParts[i]);
}
}
return resourceId.toString();
}
@Override
public ManagementLockImpl define(String name) {
return wrapModel(name);
}
@Override
protected ManagementLockImpl wrapModel(String name) {
ManagementLockObjectInner inner = new ManagementLockObjectInner();
return new ManagementLockImpl(name, inner, this.manager());
}
@Override
protected ManagementLockImpl wrapModel(ManagementLockObjectInner inner) {
if (inner == null) {
return null;
}
return new ManagementLockImpl(inner.name(), inner, this.manager());
}
@Override
public PagedIterable<ManagementLock> list() {
return wrapList(this.manager().managementLockClient().getManagementLocks().list());
}
@Override
public PagedFlux<ManagementLock> listAsync() {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks().listAsync());
}
@Override
public Mono<Void> deleteByIdAsync(String id) {
String scope = resourceIdFromLockId(id);
String lockName = ResourceUtils.nameFromResourceId(id);
if (scope != null && lockName != null) {
return this.manager().managementLockClient().getManagementLocks().deleteByScopeAsync(scope, lockName);
} else {
return Mono.empty();
}
}
@Override
public PagedIterable<ManagementLock> listByResourceGroup(String resourceGroupName) {
return wrapList(this.manager().managementLockClient().getManagementLocks()
.listByResourceGroup(resourceGroupName));
}
@Override
public PagedFlux<ManagementLock> listByResourceGroupAsync(String resourceGroupName) {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks()
.listByResourceGroupAsync(resourceGroupName));
}
@Override
public ManagementLock getByResourceGroup(String resourceGroupName, String name) {
return this.getByResourceGroupAsync(resourceGroupName, name).block();
}
@Override
public Mono<ManagementLock> getByResourceGroupAsync(String resourceGroupName, String name) {
return this.manager().managementLockClient().getManagementLocks()
.getByResourceGroupAsync(resourceGroupName, name)
.map(this::wrapModel);
}
@Override
public ManagementLock getById(String id) {
return this.getByIdAsync(id).block();
}
@Override
public Mono<ManagementLock> getByIdAsync(String id) {
String resourceId = resourceIdFromLockId(id);
String lockName = ResourceUtils.nameFromResourceId(id);
return this.manager().managementLockClient().getManagementLocks().getByScopeAsync(resourceId, lockName)
.map(this::wrapModel);
}
@Override
public void deleteByResourceGroup(String resourceGroupName, String name) {
this.deleteByResourceGroupAsync(resourceGroupName, name).block();
}
@Override
public Mono<Void> deleteByResourceGroupAsync(String resourceGroupName, String name) {
return this.manager().managementLockClient().getManagementLocks()
.deleteAtResourceGroupLevelAsync(resourceGroupName, name);
}
@Override
public Flux<String> deleteByIdsAsync(Collection<String> ids) {
if (ids == null || ids.isEmpty()) {
return Flux.empty();
}
return Flux.fromIterable(ids)
.flatMapDelayError(id -> {
String lockName = ResourceUtils.nameFromResourceId(id);
String scopeName = ManagementLocksImpl.resourceIdFromLockId(id);
return this.manager().managementLockClient().getManagementLocks()
.deleteByScopeAsync(scopeName, lockName)
.then(Mono.just(id));
}, 32, 32);
}
@Override
public Flux<String> deleteByIdsAsync(String... ids) {
return this.deleteByIdsAsync(new ArrayList<>(Arrays.asList(ids)));
}
@Override
public void deleteByIds(Collection<String> ids) {
this.deleteByIdsAsync(ids).blockLast();
}
@Override
public void deleteByIds(String... ids) {
this.deleteByIdsAsync(ids).blockLast();
}
@Override
public ResourceManager manager() {
return this.manager;
}
@Override
public PagedIterable<ManagementLock> listForResource(String resourceId) {
return wrapList(this.manager().managementLockClient().getManagementLocks().listByScope(resourceId));
}
@Override
public PagedFlux<ManagementLock> listForResourceAsync(String resourceId) {
return wrapPageAsync(this.manager().managementLockClient().getManagementLocks().listByScopeAsync(resourceId));
}
} |
please use `TestSuiteBase:safeClose` that will do the null check. | public void afterClass() {
if (this.globalClient != null) {
this.globalClient.close();
}
for (CosmosAsyncClient asyncClient : this.regionalClients)
if (asyncClient != null) {
asyncClient.close();
}
} | this.globalClient.close(); | public void afterClass() {
safeClose(this.globalClient);
for (CosmosAsyncClient asyncClient : this.regionalClients)
safeClose(asyncClient);
} | class CosmosConflictsTest extends TestSuiteBase {
private static final int CONFLICT_TIMEOUT = 120000;
private static Logger logger = LoggerFactory.getLogger(CosmosConflictsTest.class);
private String sprocBody;
private CosmosAsyncClient globalClient;
private List<CosmosAsyncClient> regionalClients;
@BeforeClass(groups = {"multi-master"}, timeOut = SETUP_TIMEOUT)
public void before_ConflictTests() throws Exception {
sprocBody = IOUtils.toString(
getClass().getClassLoader().getResourceAsStream("conflict-resolver-sproc"), "UTF-8");
globalClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.endpointDiscoveryEnabled(false)
.directMode()
.buildAsyncClient();
GlobalEndpointManager globalEndpointManager =
ReflectionUtils.getGlobalEndpointManager((RxDocumentClientImpl) globalClient.getContextClient());
DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount();
Iterator<DatabaseAccountLocation> locationIterator = databaseAccount.getWritableLocations().iterator();
regionalClients = new ArrayList<>();
while (locationIterator.hasNext()) {
DatabaseAccountLocation accountLocation = locationIterator.next();
CosmosAsyncClient regionalClient = new CosmosClientBuilder()
.endpoint(accountLocation.getEndpoint())
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.endpointDiscoveryEnabled(false)
.directMode()
.buildAsyncClient();
regionalClients.add(regionalClient);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictDefaultLWW() throws InterruptedException {
String conflictId = "conflict";
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(globalClient);
if (this.regionalClients.size() > 1) {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, asyncContainer.getDatabase().getId(), asyncContainer.getId());
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} else {
fail("Unable to find multi master account");
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomLWW() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictCustomLWWContainer"
, "/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createLastWriterWinsPolicy(
"/regionId");
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(containers.size() - 1);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
fail("Unable to find multi master account");
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictSprocContainer",
"/mypk");
CosmosAsyncStoredProcedure storedProcedure =
database.getContainer(containerProperties.getId()).getScripts().getStoredProcedure(sprocId);
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(storedProcedure);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
CosmosAsyncContainer asyncContainer = database.getContainer(containerProperties.getId());
CosmosStoredProcedureProperties procedureProperties = new CosmosStoredProcedureProperties(sprocId,
sprocBody);
asyncContainer.getScripts().createStoredProcedure(procedureProperties).block();
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(0);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
fail("Unable to find multi master account");
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictNonExistingCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictNonExistingCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties(
"conflictNonExistingSprocContainer",
"/mypk");
CosmosAsyncStoredProcedure storedProcedure =
database.getContainer(containerProperties.getId()).getScripts().getStoredProcedure(sprocId);
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(storedProcedure);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
List<ConflictTestPojo> testPojos = new ArrayList<>();
List<String> conflictIds = new ArrayList<>();
Iterator<FeedResponse<CosmosConflictProperties>> iterator = null;
for (int j = 0; j < 5; j++) {
conflictId = conflictId + j;
boolean conflictCreated = false;
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(5000);
for (int i = 1; i < 4; i++) {
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, conflictIds);
if (testPojos.size() == 0) {
logger.error("Conflict on {} insert operation has not reflected yet, retrying read after " +
"5 sec",
containers.get(0).getId());
Thread.sleep(5000);
} else {
conflictCreated = true;
break;
}
}
if (conflictCreated) {
break;
}
logger.error("Conflict on {} not created, retrying again",
containers.get(0).getId());
}
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
String query = String.format("SELECT * from c where c.id in (%s)",
Strings.join(conflictIds.stream().map(s -> "'" + s + "'").collect(Collectors.toList())).with(","));
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(conflictId));
iterator = containers.get(0).queryConflicts(query, options).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
iterator = containers.get(0).queryConflicts(query).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
for (String id : conflictIds) {
CosmosConflictRequestOptions requestOptions =
new CosmosConflictRequestOptions(new PartitionKey(conflictId));
containers.get(0).getConflict(id).delete(requestOptions).block();
}
Thread.sleep(5000);
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
testPojos.clear();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getId()).isEqualTo(conflictId);
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
fail("Unable to find multi master account");
}
}
@AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
private Mono<CosmosItemResponse<ConflictTestPojo>> tryInsertDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.createItem(test, new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<ConflictTestPojo>> tryReplaceDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.replaceItem(test, test.getId(), new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<Object>> tryDeleteDocumentTest(CosmosAsyncContainer container, String id) {
return container.deleteItem(id, new PartitionKey(id), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private boolean hasCosmosConflictException(Throwable e, int statusCode) {
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
return cosmosException.getStatusCode() == statusCode;
}
return false;
}
private void readConflicts(Iterator<FeedResponse<CosmosConflictProperties>> iterator,
List<ConflictTestPojo> pojoList,
List<String> conflictIds) {
while (iterator.hasNext()) {
for (CosmosConflictProperties conflict : iterator.next().getResults()) {
pojoList.add(conflict.getItem(ConflictTestPojo.class));
if (conflictIds != null) {
conflictIds.add(conflict.getId());
}
}
}
}
private ConflictTestPojo getTest() {
ConflictTestPojo test = new ConflictTestPojo();
String uuid = UUID.randomUUID().toString();
test.setId(uuid);
test.setMypk(uuid);
test.setRegionId(0);
return test;
}
private void warmingUpClient(List<CosmosAsyncContainer> asyncContainers, String dbId, String containerId) throws InterruptedException {
for (CosmosAsyncClient asyncClient : this.regionalClients) {
CosmosAsyncContainer container =
asyncClient.getDatabase(dbId).getContainer(containerId);
ConflictTestPojo warmUpItem = getTest();
for (int i = 1; i <= 4; i++) {
try {
container.createItem(warmUpItem).block();
asyncContainers.add(container);
break;
} catch (CosmosException ex) {
logger.error("Container {} create has not reflected yet, retrying after 5 sec", containerId);
Thread.sleep(5000);
}
}
container.readItem(warmUpItem.getId(), new PartitionKey(warmUpItem.getId()), null,
ConflictTestPojo.class).block();
}
}
private void createItemsInParallelForConflicts(List<CosmosAsyncContainer> containers, String conflictId) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
new Thread(() -> {
ConflictTestPojo conflictObject = new ConflictTestPojo();
conflictObject.setId(conflictId);
conflictObject.setMypk(conflictId);
conflictObject.setRegionId(finalI);
tryInsertDocumentTest(containers.get(finalI), conflictObject).block();
}).start();
}
}
private void replaceDeleteItemInParallelForConflicts(List<CosmosAsyncContainer> containers,
CosmosItemResponse<ConflictTestPojo> itemResponse) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
if (i == 0) {
new Thread(() -> {
tryReplaceDocumentTest(containers.get(finalI), itemResponse.getItem()).block();
}).start();
} else {
new Thread(() -> {
tryDeleteDocumentTest(containers.get(finalI), itemResponse.getItem().getId()).block();
}).start();
}
}
}
} | class CosmosConflictsTest extends TestSuiteBase {
private static final int CONFLICT_TIMEOUT = 120000;
private static Logger logger = LoggerFactory.getLogger(CosmosConflictsTest.class);
private static final String SKIP_SINGLE_REGION_MM_ACCOUNT = "Multi master account doesn't have multiple write " +
"regions to test this";
private String sprocBody;
private CosmosAsyncClient globalClient;
private List<CosmosAsyncClient> regionalClients;
@BeforeClass(groups = {"multi-master"}, timeOut = SETUP_TIMEOUT)
public void before_ConflictTests() throws Exception {
sprocBody = IOUtils.toString(
getClass().getClassLoader().getResourceAsStream("conflict-resolver-sproc"), "UTF-8");
globalClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.endpointDiscoveryEnabled(false)
.directMode()
.buildAsyncClient();
GlobalEndpointManager globalEndpointManager =
ReflectionUtils.getGlobalEndpointManager((RxDocumentClientImpl) globalClient.getContextClient());
DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount();
Iterator<DatabaseAccountLocation> locationIterator = databaseAccount.getWritableLocations().iterator();
regionalClients = new ArrayList<>();
while (locationIterator.hasNext()) {
DatabaseAccountLocation accountLocation = locationIterator.next();
List<String> prefferedLocations = new ArrayList<>();
prefferedLocations.add(accountLocation.getName());
CosmosAsyncClient regionalClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.preferredRegions(prefferedLocations)
.directMode()
.buildAsyncClient();
regionalClients.add(regionalClient);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictDefaultLWW() throws InterruptedException {
String conflictId = "conflict";
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(globalClient);
if (this.regionalClients.size() > 1) {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, asyncContainer.getDatabase().getId(), asyncContainer.getId());
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomLWW() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictCustomLWWContainer"
, "/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createLastWriterWinsPolicy(
"/regionId");
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(containers.size() - 1);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictSprocContainer",
"/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(database.getId(), containerProperties.getId(), sprocId);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
CosmosAsyncContainer asyncContainer = database.getContainer(containerProperties.getId());
CosmosStoredProcedureProperties procedureProperties = new CosmosStoredProcedureProperties(sprocId,
sprocBody);
asyncContainer.getScripts().createStoredProcedure(procedureProperties).block();
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(0);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictNonExistingCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictNonExistingCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties(
"conflictNonExistingSprocContainer",
"/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(database.getId(), containerProperties.getId(), sprocId);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
List<ConflictTestPojo> testPojos = new ArrayList<>();
List<String> conflictIds = new ArrayList<>();
Iterator<FeedResponse<CosmosConflictProperties>> iterator = null;
for (int j = 0; j < 5; j++) {
conflictId = conflictId + j;
boolean conflictCreated = false;
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(5000);
for (int i = 1; i < 4; i++) {
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, conflictIds);
if (testPojos.size() == 0) {
logger.error("Conflict on {} insert operation has not reflected yet, retrying read after " +
"5 sec",
containers.get(0).getId());
Thread.sleep(5000);
} else {
conflictCreated = true;
break;
}
}
if (conflictCreated) {
break;
}
logger.error("Conflict on {} not created, retrying again",
containers.get(0).getId());
}
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
String query = String.format("SELECT * from c where c.id in (%s)",
Strings.join(conflictIds.stream().map(s -> "'" + s + "'").collect(Collectors.toList())).with(","));
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(conflictId));
iterator = containers.get(0).queryConflicts(query, options).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
iterator = containers.get(0).queryConflicts(query).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
for (String id : conflictIds) {
CosmosConflictRequestOptions requestOptions =
new CosmosConflictRequestOptions(new PartitionKey(conflictId));
containers.get(0).getConflict(id).delete(requestOptions).block();
}
Thread.sleep(5000);
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
testPojos.clear();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getId()).isEqualTo(conflictId);
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
private Mono<CosmosItemResponse<ConflictTestPojo>> tryInsertDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.createItem(test, new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<ConflictTestPojo>> tryReplaceDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.replaceItem(test, test.getId(), new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<Object>> tryDeleteDocumentTest(CosmosAsyncContainer container, String id) {
return container.deleteItem(id, new PartitionKey(id), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private boolean hasCosmosConflictException(Throwable e, int statusCode) {
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
return cosmosException.getStatusCode() == statusCode;
}
return false;
}
private void readConflicts(Iterator<FeedResponse<CosmosConflictProperties>> iterator,
List<ConflictTestPojo> pojoList,
List<String> conflictIds) {
while (iterator.hasNext()) {
for (CosmosConflictProperties conflict : iterator.next().getResults()) {
pojoList.add(conflict.getItem(ConflictTestPojo.class));
if (conflictIds != null) {
conflictIds.add(conflict.getId());
}
}
}
}
private ConflictTestPojo getTest() {
ConflictTestPojo test = new ConflictTestPojo();
String uuid = UUID.randomUUID().toString();
test.setId(uuid);
test.setMypk(uuid);
test.setRegionId(0);
return test;
}
private void warmingUpClient(List<CosmosAsyncContainer> asyncContainers, String dbId, String containerId) throws InterruptedException {
for (CosmosAsyncClient asyncClient : this.regionalClients) {
CosmosAsyncContainer container =
asyncClient.getDatabase(dbId).getContainer(containerId);
ConflictTestPojo warmUpItem = getTest();
for (int i = 1; i <= 4; i++) {
try {
container.createItem(warmUpItem).block();
asyncContainers.add(container);
break;
} catch (CosmosException ex) {
logger.error("Container {} create has not reflected yet, retrying after 5 sec", containerId);
Thread.sleep(5000);
}
}
container.readItem(warmUpItem.getId(), new PartitionKey(warmUpItem.getId()), null,
ConflictTestPojo.class).block();
}
}
private void createItemsInParallelForConflicts(List<CosmosAsyncContainer> containers, String conflictId) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
new Thread(() -> {
ConflictTestPojo conflictObject = new ConflictTestPojo();
conflictObject.setId(conflictId);
conflictObject.setMypk(conflictId);
conflictObject.setRegionId(finalI);
tryInsertDocumentTest(containers.get(finalI), conflictObject).block();
}).start();
}
}
private void replaceDeleteItemInParallelForConflicts(List<CosmosAsyncContainer> containers,
CosmosItemResponse<ConflictTestPojo> itemResponse) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
if (i == 0) {
new Thread(() -> {
tryReplaceDocumentTest(containers.get(finalI), itemResponse.getItem()).block();
}).start();
} else {
new Thread(() -> {
tryDeleteDocumentTest(containers.get(finalI), itemResponse.getItem().getId()).block();
}).start();
}
}
}
} |
ditto. | public void afterClass() {
if (this.globalClient != null) {
this.globalClient.close();
}
for (CosmosAsyncClient asyncClient : this.regionalClients)
if (asyncClient != null) {
asyncClient.close();
}
} | asyncClient.close(); | public void afterClass() {
safeClose(this.globalClient);
for (CosmosAsyncClient asyncClient : this.regionalClients)
safeClose(asyncClient);
} | class CosmosConflictsTest extends TestSuiteBase {
private static final int CONFLICT_TIMEOUT = 120000;
private static Logger logger = LoggerFactory.getLogger(CosmosConflictsTest.class);
private String sprocBody;
private CosmosAsyncClient globalClient;
private List<CosmosAsyncClient> regionalClients;
@BeforeClass(groups = {"multi-master"}, timeOut = SETUP_TIMEOUT)
public void before_ConflictTests() throws Exception {
sprocBody = IOUtils.toString(
getClass().getClassLoader().getResourceAsStream("conflict-resolver-sproc"), "UTF-8");
globalClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.endpointDiscoveryEnabled(false)
.directMode()
.buildAsyncClient();
GlobalEndpointManager globalEndpointManager =
ReflectionUtils.getGlobalEndpointManager((RxDocumentClientImpl) globalClient.getContextClient());
DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount();
Iterator<DatabaseAccountLocation> locationIterator = databaseAccount.getWritableLocations().iterator();
regionalClients = new ArrayList<>();
while (locationIterator.hasNext()) {
DatabaseAccountLocation accountLocation = locationIterator.next();
CosmosAsyncClient regionalClient = new CosmosClientBuilder()
.endpoint(accountLocation.getEndpoint())
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.endpointDiscoveryEnabled(false)
.directMode()
.buildAsyncClient();
regionalClients.add(regionalClient);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictDefaultLWW() throws InterruptedException {
String conflictId = "conflict";
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(globalClient);
if (this.regionalClients.size() > 1) {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, asyncContainer.getDatabase().getId(), asyncContainer.getId());
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} else {
fail("Unable to find multi master account");
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomLWW() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictCustomLWWContainer"
, "/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createLastWriterWinsPolicy(
"/regionId");
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(containers.size() - 1);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
fail("Unable to find multi master account");
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictSprocContainer",
"/mypk");
CosmosAsyncStoredProcedure storedProcedure =
database.getContainer(containerProperties.getId()).getScripts().getStoredProcedure(sprocId);
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(storedProcedure);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
CosmosAsyncContainer asyncContainer = database.getContainer(containerProperties.getId());
CosmosStoredProcedureProperties procedureProperties = new CosmosStoredProcedureProperties(sprocId,
sprocBody);
asyncContainer.getScripts().createStoredProcedure(procedureProperties).block();
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(0);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
fail("Unable to find multi master account");
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictNonExistingCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictNonExistingCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties(
"conflictNonExistingSprocContainer",
"/mypk");
CosmosAsyncStoredProcedure storedProcedure =
database.getContainer(containerProperties.getId()).getScripts().getStoredProcedure(sprocId);
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(storedProcedure);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
List<ConflictTestPojo> testPojos = new ArrayList<>();
List<String> conflictIds = new ArrayList<>();
Iterator<FeedResponse<CosmosConflictProperties>> iterator = null;
for (int j = 0; j < 5; j++) {
conflictId = conflictId + j;
boolean conflictCreated = false;
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(5000);
for (int i = 1; i < 4; i++) {
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, conflictIds);
if (testPojos.size() == 0) {
logger.error("Conflict on {} insert operation has not reflected yet, retrying read after " +
"5 sec",
containers.get(0).getId());
Thread.sleep(5000);
} else {
conflictCreated = true;
break;
}
}
if (conflictCreated) {
break;
}
logger.error("Conflict on {} not created, retrying again",
containers.get(0).getId());
}
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
String query = String.format("SELECT * from c where c.id in (%s)",
Strings.join(conflictIds.stream().map(s -> "'" + s + "'").collect(Collectors.toList())).with(","));
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(conflictId));
iterator = containers.get(0).queryConflicts(query, options).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
iterator = containers.get(0).queryConflicts(query).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
for (String id : conflictIds) {
CosmosConflictRequestOptions requestOptions =
new CosmosConflictRequestOptions(new PartitionKey(conflictId));
containers.get(0).getConflict(id).delete(requestOptions).block();
}
Thread.sleep(5000);
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
testPojos.clear();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getId()).isEqualTo(conflictId);
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
fail("Unable to find multi master account");
}
}
@AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
private Mono<CosmosItemResponse<ConflictTestPojo>> tryInsertDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.createItem(test, new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<ConflictTestPojo>> tryReplaceDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.replaceItem(test, test.getId(), new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<Object>> tryDeleteDocumentTest(CosmosAsyncContainer container, String id) {
return container.deleteItem(id, new PartitionKey(id), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private boolean hasCosmosConflictException(Throwable e, int statusCode) {
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
return cosmosException.getStatusCode() == statusCode;
}
return false;
}
private void readConflicts(Iterator<FeedResponse<CosmosConflictProperties>> iterator,
List<ConflictTestPojo> pojoList,
List<String> conflictIds) {
while (iterator.hasNext()) {
for (CosmosConflictProperties conflict : iterator.next().getResults()) {
pojoList.add(conflict.getItem(ConflictTestPojo.class));
if (conflictIds != null) {
conflictIds.add(conflict.getId());
}
}
}
}
private ConflictTestPojo getTest() {
ConflictTestPojo test = new ConflictTestPojo();
String uuid = UUID.randomUUID().toString();
test.setId(uuid);
test.setMypk(uuid);
test.setRegionId(0);
return test;
}
private void warmingUpClient(List<CosmosAsyncContainer> asyncContainers, String dbId, String containerId) throws InterruptedException {
for (CosmosAsyncClient asyncClient : this.regionalClients) {
CosmosAsyncContainer container =
asyncClient.getDatabase(dbId).getContainer(containerId);
ConflictTestPojo warmUpItem = getTest();
for (int i = 1; i <= 4; i++) {
try {
container.createItem(warmUpItem).block();
asyncContainers.add(container);
break;
} catch (CosmosException ex) {
logger.error("Container {} create has not reflected yet, retrying after 5 sec", containerId);
Thread.sleep(5000);
}
}
container.readItem(warmUpItem.getId(), new PartitionKey(warmUpItem.getId()), null,
ConflictTestPojo.class).block();
}
}
private void createItemsInParallelForConflicts(List<CosmosAsyncContainer> containers, String conflictId) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
new Thread(() -> {
ConflictTestPojo conflictObject = new ConflictTestPojo();
conflictObject.setId(conflictId);
conflictObject.setMypk(conflictId);
conflictObject.setRegionId(finalI);
tryInsertDocumentTest(containers.get(finalI), conflictObject).block();
}).start();
}
}
private void replaceDeleteItemInParallelForConflicts(List<CosmosAsyncContainer> containers,
CosmosItemResponse<ConflictTestPojo> itemResponse) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
if (i == 0) {
new Thread(() -> {
tryReplaceDocumentTest(containers.get(finalI), itemResponse.getItem()).block();
}).start();
} else {
new Thread(() -> {
tryDeleteDocumentTest(containers.get(finalI), itemResponse.getItem().getId()).block();
}).start();
}
}
}
} | class CosmosConflictsTest extends TestSuiteBase {
private static final int CONFLICT_TIMEOUT = 120000;
private static Logger logger = LoggerFactory.getLogger(CosmosConflictsTest.class);
private static final String SKIP_SINGLE_REGION_MM_ACCOUNT = "Multi master account doesn't have multiple write " +
"regions to test this";
private String sprocBody;
private CosmosAsyncClient globalClient;
private List<CosmosAsyncClient> regionalClients;
@BeforeClass(groups = {"multi-master"}, timeOut = SETUP_TIMEOUT)
public void before_ConflictTests() throws Exception {
sprocBody = IOUtils.toString(
getClass().getClassLoader().getResourceAsStream("conflict-resolver-sproc"), "UTF-8");
globalClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.endpointDiscoveryEnabled(false)
.directMode()
.buildAsyncClient();
GlobalEndpointManager globalEndpointManager =
ReflectionUtils.getGlobalEndpointManager((RxDocumentClientImpl) globalClient.getContextClient());
DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount();
Iterator<DatabaseAccountLocation> locationIterator = databaseAccount.getWritableLocations().iterator();
regionalClients = new ArrayList<>();
while (locationIterator.hasNext()) {
DatabaseAccountLocation accountLocation = locationIterator.next();
List<String> prefferedLocations = new ArrayList<>();
prefferedLocations.add(accountLocation.getName());
CosmosAsyncClient regionalClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.preferredRegions(prefferedLocations)
.directMode()
.buildAsyncClient();
regionalClients.add(regionalClient);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictDefaultLWW() throws InterruptedException {
String conflictId = "conflict";
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(globalClient);
if (this.regionalClients.size() > 1) {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, asyncContainer.getDatabase().getId(), asyncContainer.getId());
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomLWW() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictCustomLWWContainer"
, "/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createLastWriterWinsPolicy(
"/regionId");
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(containers.size() - 1);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictSprocContainer",
"/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(database.getId(), containerProperties.getId(), sprocId);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
CosmosAsyncContainer asyncContainer = database.getContainer(containerProperties.getId());
CosmosStoredProcedureProperties procedureProperties = new CosmosStoredProcedureProperties(sprocId,
sprocBody);
asyncContainer.getScripts().createStoredProcedure(procedureProperties).block();
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(0);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictNonExistingCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictNonExistingCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties(
"conflictNonExistingSprocContainer",
"/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(database.getId(), containerProperties.getId(), sprocId);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
List<ConflictTestPojo> testPojos = new ArrayList<>();
List<String> conflictIds = new ArrayList<>();
Iterator<FeedResponse<CosmosConflictProperties>> iterator = null;
for (int j = 0; j < 5; j++) {
conflictId = conflictId + j;
boolean conflictCreated = false;
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(5000);
for (int i = 1; i < 4; i++) {
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, conflictIds);
if (testPojos.size() == 0) {
logger.error("Conflict on {} insert operation has not reflected yet, retrying read after " +
"5 sec",
containers.get(0).getId());
Thread.sleep(5000);
} else {
conflictCreated = true;
break;
}
}
if (conflictCreated) {
break;
}
logger.error("Conflict on {} not created, retrying again",
containers.get(0).getId());
}
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
String query = String.format("SELECT * from c where c.id in (%s)",
Strings.join(conflictIds.stream().map(s -> "'" + s + "'").collect(Collectors.toList())).with(","));
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(conflictId));
iterator = containers.get(0).queryConflicts(query, options).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
iterator = containers.get(0).queryConflicts(query).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
for (String id : conflictIds) {
CosmosConflictRequestOptions requestOptions =
new CosmosConflictRequestOptions(new PartitionKey(conflictId));
containers.get(0).getConflict(id).delete(requestOptions).block();
}
Thread.sleep(5000);
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
testPojos.clear();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getId()).isEqualTo(conflictId);
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
private Mono<CosmosItemResponse<ConflictTestPojo>> tryInsertDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.createItem(test, new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<ConflictTestPojo>> tryReplaceDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.replaceItem(test, test.getId(), new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<Object>> tryDeleteDocumentTest(CosmosAsyncContainer container, String id) {
return container.deleteItem(id, new PartitionKey(id), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private boolean hasCosmosConflictException(Throwable e, int statusCode) {
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
return cosmosException.getStatusCode() == statusCode;
}
return false;
}
private void readConflicts(Iterator<FeedResponse<CosmosConflictProperties>> iterator,
List<ConflictTestPojo> pojoList,
List<String> conflictIds) {
while (iterator.hasNext()) {
for (CosmosConflictProperties conflict : iterator.next().getResults()) {
pojoList.add(conflict.getItem(ConflictTestPojo.class));
if (conflictIds != null) {
conflictIds.add(conflict.getId());
}
}
}
}
private ConflictTestPojo getTest() {
ConflictTestPojo test = new ConflictTestPojo();
String uuid = UUID.randomUUID().toString();
test.setId(uuid);
test.setMypk(uuid);
test.setRegionId(0);
return test;
}
private void warmingUpClient(List<CosmosAsyncContainer> asyncContainers, String dbId, String containerId) throws InterruptedException {
for (CosmosAsyncClient asyncClient : this.regionalClients) {
CosmosAsyncContainer container =
asyncClient.getDatabase(dbId).getContainer(containerId);
ConflictTestPojo warmUpItem = getTest();
for (int i = 1; i <= 4; i++) {
try {
container.createItem(warmUpItem).block();
asyncContainers.add(container);
break;
} catch (CosmosException ex) {
logger.error("Container {} create has not reflected yet, retrying after 5 sec", containerId);
Thread.sleep(5000);
}
}
container.readItem(warmUpItem.getId(), new PartitionKey(warmUpItem.getId()), null,
ConflictTestPojo.class).block();
}
}
private void createItemsInParallelForConflicts(List<CosmosAsyncContainer> containers, String conflictId) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
new Thread(() -> {
ConflictTestPojo conflictObject = new ConflictTestPojo();
conflictObject.setId(conflictId);
conflictObject.setMypk(conflictId);
conflictObject.setRegionId(finalI);
tryInsertDocumentTest(containers.get(finalI), conflictObject).block();
}).start();
}
}
private void replaceDeleteItemInParallelForConflicts(List<CosmosAsyncContainer> containers,
CosmosItemResponse<ConflictTestPojo> itemResponse) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
if (i == 0) {
new Thread(() -> {
tryReplaceDocumentTest(containers.get(finalI), itemResponse.getItem()).block();
}).start();
} else {
new Thread(() -> {
tryDeleteDocumentTest(containers.get(finalI), itemResponse.getItem().getId()).block();
}).start();
}
}
}
} |
done | public void afterClass() {
if (this.globalClient != null) {
this.globalClient.close();
}
for (CosmosAsyncClient asyncClient : this.regionalClients)
if (asyncClient != null) {
asyncClient.close();
}
} | this.globalClient.close(); | public void afterClass() {
safeClose(this.globalClient);
for (CosmosAsyncClient asyncClient : this.regionalClients)
safeClose(asyncClient);
} | class CosmosConflictsTest extends TestSuiteBase {
private static final int CONFLICT_TIMEOUT = 120000;
private static Logger logger = LoggerFactory.getLogger(CosmosConflictsTest.class);
private String sprocBody;
private CosmosAsyncClient globalClient;
private List<CosmosAsyncClient> regionalClients;
@BeforeClass(groups = {"multi-master"}, timeOut = SETUP_TIMEOUT)
public void before_ConflictTests() throws Exception {
sprocBody = IOUtils.toString(
getClass().getClassLoader().getResourceAsStream("conflict-resolver-sproc"), "UTF-8");
globalClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.endpointDiscoveryEnabled(false)
.directMode()
.buildAsyncClient();
GlobalEndpointManager globalEndpointManager =
ReflectionUtils.getGlobalEndpointManager((RxDocumentClientImpl) globalClient.getContextClient());
DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount();
Iterator<DatabaseAccountLocation> locationIterator = databaseAccount.getWritableLocations().iterator();
regionalClients = new ArrayList<>();
while (locationIterator.hasNext()) {
DatabaseAccountLocation accountLocation = locationIterator.next();
CosmosAsyncClient regionalClient = new CosmosClientBuilder()
.endpoint(accountLocation.getEndpoint())
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.endpointDiscoveryEnabled(false)
.directMode()
.buildAsyncClient();
regionalClients.add(regionalClient);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictDefaultLWW() throws InterruptedException {
String conflictId = "conflict";
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(globalClient);
if (this.regionalClients.size() > 1) {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, asyncContainer.getDatabase().getId(), asyncContainer.getId());
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} else {
fail("Unable to find multi master account");
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomLWW() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictCustomLWWContainer"
, "/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createLastWriterWinsPolicy(
"/regionId");
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(containers.size() - 1);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
fail("Unable to find multi master account");
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictSprocContainer",
"/mypk");
CosmosAsyncStoredProcedure storedProcedure =
database.getContainer(containerProperties.getId()).getScripts().getStoredProcedure(sprocId);
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(storedProcedure);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
CosmosAsyncContainer asyncContainer = database.getContainer(containerProperties.getId());
CosmosStoredProcedureProperties procedureProperties = new CosmosStoredProcedureProperties(sprocId,
sprocBody);
asyncContainer.getScripts().createStoredProcedure(procedureProperties).block();
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(0);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
fail("Unable to find multi master account");
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictNonExistingCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictNonExistingCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties(
"conflictNonExistingSprocContainer",
"/mypk");
CosmosAsyncStoredProcedure storedProcedure =
database.getContainer(containerProperties.getId()).getScripts().getStoredProcedure(sprocId);
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(storedProcedure);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
List<ConflictTestPojo> testPojos = new ArrayList<>();
List<String> conflictIds = new ArrayList<>();
Iterator<FeedResponse<CosmosConflictProperties>> iterator = null;
for (int j = 0; j < 5; j++) {
conflictId = conflictId + j;
boolean conflictCreated = false;
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(5000);
for (int i = 1; i < 4; i++) {
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, conflictIds);
if (testPojos.size() == 0) {
logger.error("Conflict on {} insert operation has not reflected yet, retrying read after " +
"5 sec",
containers.get(0).getId());
Thread.sleep(5000);
} else {
conflictCreated = true;
break;
}
}
if (conflictCreated) {
break;
}
logger.error("Conflict on {} not created, retrying again",
containers.get(0).getId());
}
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
String query = String.format("SELECT * from c where c.id in (%s)",
Strings.join(conflictIds.stream().map(s -> "'" + s + "'").collect(Collectors.toList())).with(","));
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(conflictId));
iterator = containers.get(0).queryConflicts(query, options).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
iterator = containers.get(0).queryConflicts(query).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
for (String id : conflictIds) {
CosmosConflictRequestOptions requestOptions =
new CosmosConflictRequestOptions(new PartitionKey(conflictId));
containers.get(0).getConflict(id).delete(requestOptions).block();
}
Thread.sleep(5000);
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
testPojos.clear();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getId()).isEqualTo(conflictId);
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
fail("Unable to find multi master account");
}
}
@AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
private Mono<CosmosItemResponse<ConflictTestPojo>> tryInsertDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.createItem(test, new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<ConflictTestPojo>> tryReplaceDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.replaceItem(test, test.getId(), new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<Object>> tryDeleteDocumentTest(CosmosAsyncContainer container, String id) {
return container.deleteItem(id, new PartitionKey(id), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private boolean hasCosmosConflictException(Throwable e, int statusCode) {
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
return cosmosException.getStatusCode() == statusCode;
}
return false;
}
private void readConflicts(Iterator<FeedResponse<CosmosConflictProperties>> iterator,
List<ConflictTestPojo> pojoList,
List<String> conflictIds) {
while (iterator.hasNext()) {
for (CosmosConflictProperties conflict : iterator.next().getResults()) {
pojoList.add(conflict.getItem(ConflictTestPojo.class));
if (conflictIds != null) {
conflictIds.add(conflict.getId());
}
}
}
}
private ConflictTestPojo getTest() {
ConflictTestPojo test = new ConflictTestPojo();
String uuid = UUID.randomUUID().toString();
test.setId(uuid);
test.setMypk(uuid);
test.setRegionId(0);
return test;
}
private void warmingUpClient(List<CosmosAsyncContainer> asyncContainers, String dbId, String containerId) throws InterruptedException {
for (CosmosAsyncClient asyncClient : this.regionalClients) {
CosmosAsyncContainer container =
asyncClient.getDatabase(dbId).getContainer(containerId);
ConflictTestPojo warmUpItem = getTest();
for (int i = 1; i <= 4; i++) {
try {
container.createItem(warmUpItem).block();
asyncContainers.add(container);
break;
} catch (CosmosException ex) {
logger.error("Container {} create has not reflected yet, retrying after 5 sec", containerId);
Thread.sleep(5000);
}
}
container.readItem(warmUpItem.getId(), new PartitionKey(warmUpItem.getId()), null,
ConflictTestPojo.class).block();
}
}
private void createItemsInParallelForConflicts(List<CosmosAsyncContainer> containers, String conflictId) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
new Thread(() -> {
ConflictTestPojo conflictObject = new ConflictTestPojo();
conflictObject.setId(conflictId);
conflictObject.setMypk(conflictId);
conflictObject.setRegionId(finalI);
tryInsertDocumentTest(containers.get(finalI), conflictObject).block();
}).start();
}
}
private void replaceDeleteItemInParallelForConflicts(List<CosmosAsyncContainer> containers,
CosmosItemResponse<ConflictTestPojo> itemResponse) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
if (i == 0) {
new Thread(() -> {
tryReplaceDocumentTest(containers.get(finalI), itemResponse.getItem()).block();
}).start();
} else {
new Thread(() -> {
tryDeleteDocumentTest(containers.get(finalI), itemResponse.getItem().getId()).block();
}).start();
}
}
}
} | class CosmosConflictsTest extends TestSuiteBase {
private static final int CONFLICT_TIMEOUT = 120000;
private static Logger logger = LoggerFactory.getLogger(CosmosConflictsTest.class);
private static final String SKIP_SINGLE_REGION_MM_ACCOUNT = "Multi master account doesn't have multiple write " +
"regions to test this";
private String sprocBody;
private CosmosAsyncClient globalClient;
private List<CosmosAsyncClient> regionalClients;
@BeforeClass(groups = {"multi-master"}, timeOut = SETUP_TIMEOUT)
public void before_ConflictTests() throws Exception {
sprocBody = IOUtils.toString(
getClass().getClassLoader().getResourceAsStream("conflict-resolver-sproc"), "UTF-8");
globalClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.endpointDiscoveryEnabled(false)
.directMode()
.buildAsyncClient();
GlobalEndpointManager globalEndpointManager =
ReflectionUtils.getGlobalEndpointManager((RxDocumentClientImpl) globalClient.getContextClient());
DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount();
Iterator<DatabaseAccountLocation> locationIterator = databaseAccount.getWritableLocations().iterator();
regionalClients = new ArrayList<>();
while (locationIterator.hasNext()) {
DatabaseAccountLocation accountLocation = locationIterator.next();
List<String> prefferedLocations = new ArrayList<>();
prefferedLocations.add(accountLocation.getName());
CosmosAsyncClient regionalClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.preferredRegions(prefferedLocations)
.directMode()
.buildAsyncClient();
regionalClients.add(regionalClient);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictDefaultLWW() throws InterruptedException {
String conflictId = "conflict";
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(globalClient);
if (this.regionalClients.size() > 1) {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, asyncContainer.getDatabase().getId(), asyncContainer.getId());
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomLWW() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictCustomLWWContainer"
, "/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createLastWriterWinsPolicy(
"/regionId");
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(containers.size() - 1);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictSprocContainer",
"/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(database.getId(), containerProperties.getId(), sprocId);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
CosmosAsyncContainer asyncContainer = database.getContainer(containerProperties.getId());
CosmosStoredProcedureProperties procedureProperties = new CosmosStoredProcedureProperties(sprocId,
sprocBody);
asyncContainer.getScripts().createStoredProcedure(procedureProperties).block();
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(0);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictNonExistingCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictNonExistingCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties(
"conflictNonExistingSprocContainer",
"/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(database.getId(), containerProperties.getId(), sprocId);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
List<ConflictTestPojo> testPojos = new ArrayList<>();
List<String> conflictIds = new ArrayList<>();
Iterator<FeedResponse<CosmosConflictProperties>> iterator = null;
for (int j = 0; j < 5; j++) {
conflictId = conflictId + j;
boolean conflictCreated = false;
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(5000);
for (int i = 1; i < 4; i++) {
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, conflictIds);
if (testPojos.size() == 0) {
logger.error("Conflict on {} insert operation has not reflected yet, retrying read after " +
"5 sec",
containers.get(0).getId());
Thread.sleep(5000);
} else {
conflictCreated = true;
break;
}
}
if (conflictCreated) {
break;
}
logger.error("Conflict on {} not created, retrying again",
containers.get(0).getId());
}
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
String query = String.format("SELECT * from c where c.id in (%s)",
Strings.join(conflictIds.stream().map(s -> "'" + s + "'").collect(Collectors.toList())).with(","));
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(conflictId));
iterator = containers.get(0).queryConflicts(query, options).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
iterator = containers.get(0).queryConflicts(query).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
for (String id : conflictIds) {
CosmosConflictRequestOptions requestOptions =
new CosmosConflictRequestOptions(new PartitionKey(conflictId));
containers.get(0).getConflict(id).delete(requestOptions).block();
}
Thread.sleep(5000);
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
testPojos.clear();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getId()).isEqualTo(conflictId);
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
private Mono<CosmosItemResponse<ConflictTestPojo>> tryInsertDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.createItem(test, new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<ConflictTestPojo>> tryReplaceDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.replaceItem(test, test.getId(), new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<Object>> tryDeleteDocumentTest(CosmosAsyncContainer container, String id) {
return container.deleteItem(id, new PartitionKey(id), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private boolean hasCosmosConflictException(Throwable e, int statusCode) {
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
return cosmosException.getStatusCode() == statusCode;
}
return false;
}
private void readConflicts(Iterator<FeedResponse<CosmosConflictProperties>> iterator,
List<ConflictTestPojo> pojoList,
List<String> conflictIds) {
while (iterator.hasNext()) {
for (CosmosConflictProperties conflict : iterator.next().getResults()) {
pojoList.add(conflict.getItem(ConflictTestPojo.class));
if (conflictIds != null) {
conflictIds.add(conflict.getId());
}
}
}
}
private ConflictTestPojo getTest() {
ConflictTestPojo test = new ConflictTestPojo();
String uuid = UUID.randomUUID().toString();
test.setId(uuid);
test.setMypk(uuid);
test.setRegionId(0);
return test;
}
private void warmingUpClient(List<CosmosAsyncContainer> asyncContainers, String dbId, String containerId) throws InterruptedException {
for (CosmosAsyncClient asyncClient : this.regionalClients) {
CosmosAsyncContainer container =
asyncClient.getDatabase(dbId).getContainer(containerId);
ConflictTestPojo warmUpItem = getTest();
for (int i = 1; i <= 4; i++) {
try {
container.createItem(warmUpItem).block();
asyncContainers.add(container);
break;
} catch (CosmosException ex) {
logger.error("Container {} create has not reflected yet, retrying after 5 sec", containerId);
Thread.sleep(5000);
}
}
container.readItem(warmUpItem.getId(), new PartitionKey(warmUpItem.getId()), null,
ConflictTestPojo.class).block();
}
}
private void createItemsInParallelForConflicts(List<CosmosAsyncContainer> containers, String conflictId) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
new Thread(() -> {
ConflictTestPojo conflictObject = new ConflictTestPojo();
conflictObject.setId(conflictId);
conflictObject.setMypk(conflictId);
conflictObject.setRegionId(finalI);
tryInsertDocumentTest(containers.get(finalI), conflictObject).block();
}).start();
}
}
private void replaceDeleteItemInParallelForConflicts(List<CosmosAsyncContainer> containers,
CosmosItemResponse<ConflictTestPojo> itemResponse) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
if (i == 0) {
new Thread(() -> {
tryReplaceDocumentTest(containers.get(finalI), itemResponse.getItem()).block();
}).start();
} else {
new Thread(() -> {
tryDeleteDocumentTest(containers.get(finalI), itemResponse.getItem().getId()).block();
}).start();
}
}
}
} |
done | public void afterClass() {
if (this.globalClient != null) {
this.globalClient.close();
}
for (CosmosAsyncClient asyncClient : this.regionalClients)
if (asyncClient != null) {
asyncClient.close();
}
} | asyncClient.close(); | public void afterClass() {
safeClose(this.globalClient);
for (CosmosAsyncClient asyncClient : this.regionalClients)
safeClose(asyncClient);
} | class CosmosConflictsTest extends TestSuiteBase {
private static final int CONFLICT_TIMEOUT = 120000;
private static Logger logger = LoggerFactory.getLogger(CosmosConflictsTest.class);
private String sprocBody;
private CosmosAsyncClient globalClient;
private List<CosmosAsyncClient> regionalClients;
@BeforeClass(groups = {"multi-master"}, timeOut = SETUP_TIMEOUT)
public void before_ConflictTests() throws Exception {
sprocBody = IOUtils.toString(
getClass().getClassLoader().getResourceAsStream("conflict-resolver-sproc"), "UTF-8");
globalClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.endpointDiscoveryEnabled(false)
.directMode()
.buildAsyncClient();
GlobalEndpointManager globalEndpointManager =
ReflectionUtils.getGlobalEndpointManager((RxDocumentClientImpl) globalClient.getContextClient());
DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount();
Iterator<DatabaseAccountLocation> locationIterator = databaseAccount.getWritableLocations().iterator();
regionalClients = new ArrayList<>();
while (locationIterator.hasNext()) {
DatabaseAccountLocation accountLocation = locationIterator.next();
CosmosAsyncClient regionalClient = new CosmosClientBuilder()
.endpoint(accountLocation.getEndpoint())
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.endpointDiscoveryEnabled(false)
.directMode()
.buildAsyncClient();
regionalClients.add(regionalClient);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictDefaultLWW() throws InterruptedException {
String conflictId = "conflict";
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(globalClient);
if (this.regionalClients.size() > 1) {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, asyncContainer.getDatabase().getId(), asyncContainer.getId());
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} else {
fail("Unable to find multi master account");
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomLWW() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictCustomLWWContainer"
, "/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createLastWriterWinsPolicy(
"/regionId");
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(containers.size() - 1);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
fail("Unable to find multi master account");
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictSprocContainer",
"/mypk");
CosmosAsyncStoredProcedure storedProcedure =
database.getContainer(containerProperties.getId()).getScripts().getStoredProcedure(sprocId);
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(storedProcedure);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
CosmosAsyncContainer asyncContainer = database.getContainer(containerProperties.getId());
CosmosStoredProcedureProperties procedureProperties = new CosmosStoredProcedureProperties(sprocId,
sprocBody);
asyncContainer.getScripts().createStoredProcedure(procedureProperties).block();
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(0);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
fail("Unable to find multi master account");
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictNonExistingCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictNonExistingCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties(
"conflictNonExistingSprocContainer",
"/mypk");
CosmosAsyncStoredProcedure storedProcedure =
database.getContainer(containerProperties.getId()).getScripts().getStoredProcedure(sprocId);
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(storedProcedure);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
List<ConflictTestPojo> testPojos = new ArrayList<>();
List<String> conflictIds = new ArrayList<>();
Iterator<FeedResponse<CosmosConflictProperties>> iterator = null;
for (int j = 0; j < 5; j++) {
conflictId = conflictId + j;
boolean conflictCreated = false;
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(5000);
for (int i = 1; i < 4; i++) {
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, conflictIds);
if (testPojos.size() == 0) {
logger.error("Conflict on {} insert operation has not reflected yet, retrying read after " +
"5 sec",
containers.get(0).getId());
Thread.sleep(5000);
} else {
conflictCreated = true;
break;
}
}
if (conflictCreated) {
break;
}
logger.error("Conflict on {} not created, retrying again",
containers.get(0).getId());
}
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
String query = String.format("SELECT * from c where c.id in (%s)",
Strings.join(conflictIds.stream().map(s -> "'" + s + "'").collect(Collectors.toList())).with(","));
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(conflictId));
iterator = containers.get(0).queryConflicts(query, options).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
iterator = containers.get(0).queryConflicts(query).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
for (String id : conflictIds) {
CosmosConflictRequestOptions requestOptions =
new CosmosConflictRequestOptions(new PartitionKey(conflictId));
containers.get(0).getConflict(id).delete(requestOptions).block();
}
Thread.sleep(5000);
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
testPojos.clear();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getId()).isEqualTo(conflictId);
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
fail("Unable to find multi master account");
}
}
@AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
private Mono<CosmosItemResponse<ConflictTestPojo>> tryInsertDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.createItem(test, new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<ConflictTestPojo>> tryReplaceDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.replaceItem(test, test.getId(), new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<Object>> tryDeleteDocumentTest(CosmosAsyncContainer container, String id) {
return container.deleteItem(id, new PartitionKey(id), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private boolean hasCosmosConflictException(Throwable e, int statusCode) {
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
return cosmosException.getStatusCode() == statusCode;
}
return false;
}
private void readConflicts(Iterator<FeedResponse<CosmosConflictProperties>> iterator,
List<ConflictTestPojo> pojoList,
List<String> conflictIds) {
while (iterator.hasNext()) {
for (CosmosConflictProperties conflict : iterator.next().getResults()) {
pojoList.add(conflict.getItem(ConflictTestPojo.class));
if (conflictIds != null) {
conflictIds.add(conflict.getId());
}
}
}
}
private ConflictTestPojo getTest() {
ConflictTestPojo test = new ConflictTestPojo();
String uuid = UUID.randomUUID().toString();
test.setId(uuid);
test.setMypk(uuid);
test.setRegionId(0);
return test;
}
private void warmingUpClient(List<CosmosAsyncContainer> asyncContainers, String dbId, String containerId) throws InterruptedException {
for (CosmosAsyncClient asyncClient : this.regionalClients) {
CosmosAsyncContainer container =
asyncClient.getDatabase(dbId).getContainer(containerId);
ConflictTestPojo warmUpItem = getTest();
for (int i = 1; i <= 4; i++) {
try {
container.createItem(warmUpItem).block();
asyncContainers.add(container);
break;
} catch (CosmosException ex) {
logger.error("Container {} create has not reflected yet, retrying after 5 sec", containerId);
Thread.sleep(5000);
}
}
container.readItem(warmUpItem.getId(), new PartitionKey(warmUpItem.getId()), null,
ConflictTestPojo.class).block();
}
}
private void createItemsInParallelForConflicts(List<CosmosAsyncContainer> containers, String conflictId) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
new Thread(() -> {
ConflictTestPojo conflictObject = new ConflictTestPojo();
conflictObject.setId(conflictId);
conflictObject.setMypk(conflictId);
conflictObject.setRegionId(finalI);
tryInsertDocumentTest(containers.get(finalI), conflictObject).block();
}).start();
}
}
private void replaceDeleteItemInParallelForConflicts(List<CosmosAsyncContainer> containers,
CosmosItemResponse<ConflictTestPojo> itemResponse) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
if (i == 0) {
new Thread(() -> {
tryReplaceDocumentTest(containers.get(finalI), itemResponse.getItem()).block();
}).start();
} else {
new Thread(() -> {
tryDeleteDocumentTest(containers.get(finalI), itemResponse.getItem().getId()).block();
}).start();
}
}
}
} | class CosmosConflictsTest extends TestSuiteBase {
private static final int CONFLICT_TIMEOUT = 120000;
private static Logger logger = LoggerFactory.getLogger(CosmosConflictsTest.class);
private static final String SKIP_SINGLE_REGION_MM_ACCOUNT = "Multi master account doesn't have multiple write " +
"regions to test this";
private String sprocBody;
private CosmosAsyncClient globalClient;
private List<CosmosAsyncClient> regionalClients;
@BeforeClass(groups = {"multi-master"}, timeOut = SETUP_TIMEOUT)
public void before_ConflictTests() throws Exception {
sprocBody = IOUtils.toString(
getClass().getClassLoader().getResourceAsStream("conflict-resolver-sproc"), "UTF-8");
globalClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.endpointDiscoveryEnabled(false)
.directMode()
.buildAsyncClient();
GlobalEndpointManager globalEndpointManager =
ReflectionUtils.getGlobalEndpointManager((RxDocumentClientImpl) globalClient.getContextClient());
DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount();
Iterator<DatabaseAccountLocation> locationIterator = databaseAccount.getWritableLocations().iterator();
regionalClients = new ArrayList<>();
while (locationIterator.hasNext()) {
DatabaseAccountLocation accountLocation = locationIterator.next();
List<String> prefferedLocations = new ArrayList<>();
prefferedLocations.add(accountLocation.getName());
CosmosAsyncClient regionalClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.preferredRegions(prefferedLocations)
.directMode()
.buildAsyncClient();
regionalClients.add(regionalClient);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictDefaultLWW() throws InterruptedException {
String conflictId = "conflict";
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(globalClient);
if (this.regionalClients.size() > 1) {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, asyncContainer.getDatabase().getId(), asyncContainer.getId());
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomLWW() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictCustomLWWContainer"
, "/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createLastWriterWinsPolicy(
"/regionId");
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(containers.size() - 1);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictSprocContainer",
"/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(database.getId(), containerProperties.getId(), sprocId);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
CosmosAsyncContainer asyncContainer = database.getContainer(containerProperties.getId());
CosmosStoredProcedureProperties procedureProperties = new CosmosStoredProcedureProperties(sprocId,
sprocBody);
asyncContainer.getScripts().createStoredProcedure(procedureProperties).block();
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(0);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictNonExistingCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictNonExistingCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties(
"conflictNonExistingSprocContainer",
"/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(database.getId(), containerProperties.getId(), sprocId);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
List<ConflictTestPojo> testPojos = new ArrayList<>();
List<String> conflictIds = new ArrayList<>();
Iterator<FeedResponse<CosmosConflictProperties>> iterator = null;
for (int j = 0; j < 5; j++) {
conflictId = conflictId + j;
boolean conflictCreated = false;
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(5000);
for (int i = 1; i < 4; i++) {
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, conflictIds);
if (testPojos.size() == 0) {
logger.error("Conflict on {} insert operation has not reflected yet, retrying read after " +
"5 sec",
containers.get(0).getId());
Thread.sleep(5000);
} else {
conflictCreated = true;
break;
}
}
if (conflictCreated) {
break;
}
logger.error("Conflict on {} not created, retrying again",
containers.get(0).getId());
}
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
String query = String.format("SELECT * from c where c.id in (%s)",
Strings.join(conflictIds.stream().map(s -> "'" + s + "'").collect(Collectors.toList())).with(","));
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(conflictId));
iterator = containers.get(0).queryConflicts(query, options).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
iterator = containers.get(0).queryConflicts(query).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
for (String id : conflictIds) {
CosmosConflictRequestOptions requestOptions =
new CosmosConflictRequestOptions(new PartitionKey(conflictId));
containers.get(0).getConflict(id).delete(requestOptions).block();
}
Thread.sleep(5000);
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
testPojos.clear();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getId()).isEqualTo(conflictId);
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
private Mono<CosmosItemResponse<ConflictTestPojo>> tryInsertDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.createItem(test, new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<ConflictTestPojo>> tryReplaceDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.replaceItem(test, test.getId(), new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<Object>> tryDeleteDocumentTest(CosmosAsyncContainer container, String id) {
return container.deleteItem(id, new PartitionKey(id), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private boolean hasCosmosConflictException(Throwable e, int statusCode) {
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
return cosmosException.getStatusCode() == statusCode;
}
return false;
}
private void readConflicts(Iterator<FeedResponse<CosmosConflictProperties>> iterator,
List<ConflictTestPojo> pojoList,
List<String> conflictIds) {
while (iterator.hasNext()) {
for (CosmosConflictProperties conflict : iterator.next().getResults()) {
pojoList.add(conflict.getItem(ConflictTestPojo.class));
if (conflictIds != null) {
conflictIds.add(conflict.getId());
}
}
}
}
private ConflictTestPojo getTest() {
ConflictTestPojo test = new ConflictTestPojo();
String uuid = UUID.randomUUID().toString();
test.setId(uuid);
test.setMypk(uuid);
test.setRegionId(0);
return test;
}
private void warmingUpClient(List<CosmosAsyncContainer> asyncContainers, String dbId, String containerId) throws InterruptedException {
for (CosmosAsyncClient asyncClient : this.regionalClients) {
CosmosAsyncContainer container =
asyncClient.getDatabase(dbId).getContainer(containerId);
ConflictTestPojo warmUpItem = getTest();
for (int i = 1; i <= 4; i++) {
try {
container.createItem(warmUpItem).block();
asyncContainers.add(container);
break;
} catch (CosmosException ex) {
logger.error("Container {} create has not reflected yet, retrying after 5 sec", containerId);
Thread.sleep(5000);
}
}
container.readItem(warmUpItem.getId(), new PartitionKey(warmUpItem.getId()), null,
ConflictTestPojo.class).block();
}
}
private void createItemsInParallelForConflicts(List<CosmosAsyncContainer> containers, String conflictId) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
new Thread(() -> {
ConflictTestPojo conflictObject = new ConflictTestPojo();
conflictObject.setId(conflictId);
conflictObject.setMypk(conflictId);
conflictObject.setRegionId(finalI);
tryInsertDocumentTest(containers.get(finalI), conflictObject).block();
}).start();
}
}
private void replaceDeleteItemInParallelForConflicts(List<CosmosAsyncContainer> containers,
CosmosItemResponse<ConflictTestPojo> itemResponse) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
if (i == 0) {
new Thread(() -> {
tryReplaceDocumentTest(containers.get(finalI), itemResponse.getItem()).block();
}).start();
} else {
new Thread(() -> {
tryDeleteDocumentTest(containers.get(finalI), itemResponse.getItem().getId()).block();
}).start();
}
}
}
} |
you shouldn't pass the regional account endpoint to the CosmosClientBuilder. instead you should pass the global account endpoint and set the preferred region to be the local region of interest. | public void before_ConflictTests() throws Exception {
sprocBody = IOUtils.toString(
getClass().getClassLoader().getResourceAsStream("conflict-resolver-sproc"), "UTF-8");
globalClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.endpointDiscoveryEnabled(false)
.directMode()
.buildAsyncClient();
GlobalEndpointManager globalEndpointManager =
ReflectionUtils.getGlobalEndpointManager((RxDocumentClientImpl) globalClient.getContextClient());
DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount();
Iterator<DatabaseAccountLocation> locationIterator = databaseAccount.getWritableLocations().iterator();
regionalClients = new ArrayList<>();
while (locationIterator.hasNext()) {
DatabaseAccountLocation accountLocation = locationIterator.next();
CosmosAsyncClient regionalClient = new CosmosClientBuilder()
.endpoint(accountLocation.getEndpoint())
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.endpointDiscoveryEnabled(false)
.directMode()
.buildAsyncClient();
regionalClients.add(regionalClient);
}
} | .endpoint(accountLocation.getEndpoint()) | public void before_ConflictTests() throws Exception {
sprocBody = IOUtils.toString(
getClass().getClassLoader().getResourceAsStream("conflict-resolver-sproc"), "UTF-8");
globalClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.endpointDiscoveryEnabled(false)
.directMode()
.buildAsyncClient();
GlobalEndpointManager globalEndpointManager =
ReflectionUtils.getGlobalEndpointManager((RxDocumentClientImpl) globalClient.getContextClient());
DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount();
Iterator<DatabaseAccountLocation> locationIterator = databaseAccount.getWritableLocations().iterator();
regionalClients = new ArrayList<>();
while (locationIterator.hasNext()) {
DatabaseAccountLocation accountLocation = locationIterator.next();
List<String> prefferedLocations = new ArrayList<>();
prefferedLocations.add(accountLocation.getName());
CosmosAsyncClient regionalClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.preferredRegions(prefferedLocations)
.directMode()
.buildAsyncClient();
regionalClients.add(regionalClient);
}
} | class CosmosConflictsTest extends TestSuiteBase {
private static final int CONFLICT_TIMEOUT = 120000;
private static Logger logger = LoggerFactory.getLogger(CosmosConflictsTest.class);
private String sprocBody;
private CosmosAsyncClient globalClient;
private List<CosmosAsyncClient> regionalClients;
@BeforeClass(groups = {"multi-master"}, timeOut = SETUP_TIMEOUT)
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictDefaultLWW() throws InterruptedException {
String conflictId = "conflict";
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(globalClient);
if (this.regionalClients.size() > 1) {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, asyncContainer.getDatabase().getId(), asyncContainer.getId());
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} else {
fail("Unable to find multi master account");
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomLWW() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictCustomLWWContainer"
, "/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createLastWriterWinsPolicy(
"/regionId");
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(containers.size() - 1);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
fail("Unable to find multi master account");
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictSprocContainer",
"/mypk");
CosmosAsyncStoredProcedure storedProcedure =
database.getContainer(containerProperties.getId()).getScripts().getStoredProcedure(sprocId);
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(storedProcedure);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
CosmosAsyncContainer asyncContainer = database.getContainer(containerProperties.getId());
CosmosStoredProcedureProperties procedureProperties = new CosmosStoredProcedureProperties(sprocId,
sprocBody);
asyncContainer.getScripts().createStoredProcedure(procedureProperties).block();
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(0);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
fail("Unable to find multi master account");
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictNonExistingCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictNonExistingCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties(
"conflictNonExistingSprocContainer",
"/mypk");
CosmosAsyncStoredProcedure storedProcedure =
database.getContainer(containerProperties.getId()).getScripts().getStoredProcedure(sprocId);
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(storedProcedure);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
List<ConflictTestPojo> testPojos = new ArrayList<>();
List<String> conflictIds = new ArrayList<>();
Iterator<FeedResponse<CosmosConflictProperties>> iterator = null;
for (int j = 0; j < 5; j++) {
conflictId = conflictId + j;
boolean conflictCreated = false;
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(5000);
for (int i = 1; i < 4; i++) {
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, conflictIds);
if (testPojos.size() == 0) {
logger.error("Conflict on {} insert operation has not reflected yet, retrying read after " +
"5 sec",
containers.get(0).getId());
Thread.sleep(5000);
} else {
conflictCreated = true;
break;
}
}
if (conflictCreated) {
break;
}
logger.error("Conflict on {} not created, retrying again",
containers.get(0).getId());
}
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
String query = String.format("SELECT * from c where c.id in (%s)",
Strings.join(conflictIds.stream().map(s -> "'" + s + "'").collect(Collectors.toList())).with(","));
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(conflictId));
iterator = containers.get(0).queryConflicts(query, options).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
iterator = containers.get(0).queryConflicts(query).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
for (String id : conflictIds) {
CosmosConflictRequestOptions requestOptions =
new CosmosConflictRequestOptions(new PartitionKey(conflictId));
containers.get(0).getConflict(id).delete(requestOptions).block();
}
Thread.sleep(5000);
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
testPojos.clear();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getId()).isEqualTo(conflictId);
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
fail("Unable to find multi master account");
}
}
@AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(this.globalClient);
for (CosmosAsyncClient asyncClient : this.regionalClients)
safeClose(asyncClient);
}
private Mono<CosmosItemResponse<ConflictTestPojo>> tryInsertDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.createItem(test, new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<ConflictTestPojo>> tryReplaceDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.replaceItem(test, test.getId(), new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<Object>> tryDeleteDocumentTest(CosmosAsyncContainer container, String id) {
return container.deleteItem(id, new PartitionKey(id), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private boolean hasCosmosConflictException(Throwable e, int statusCode) {
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
return cosmosException.getStatusCode() == statusCode;
}
return false;
}
private void readConflicts(Iterator<FeedResponse<CosmosConflictProperties>> iterator,
List<ConflictTestPojo> pojoList,
List<String> conflictIds) {
while (iterator.hasNext()) {
for (CosmosConflictProperties conflict : iterator.next().getResults()) {
pojoList.add(conflict.getItem(ConflictTestPojo.class));
if (conflictIds != null) {
conflictIds.add(conflict.getId());
}
}
}
}
private ConflictTestPojo getTest() {
ConflictTestPojo test = new ConflictTestPojo();
String uuid = UUID.randomUUID().toString();
test.setId(uuid);
test.setMypk(uuid);
test.setRegionId(0);
return test;
}
private void warmingUpClient(List<CosmosAsyncContainer> asyncContainers, String dbId, String containerId) throws InterruptedException {
for (CosmosAsyncClient asyncClient : this.regionalClients) {
CosmosAsyncContainer container =
asyncClient.getDatabase(dbId).getContainer(containerId);
ConflictTestPojo warmUpItem = getTest();
for (int i = 1; i <= 4; i++) {
try {
container.createItem(warmUpItem).block();
asyncContainers.add(container);
break;
} catch (CosmosException ex) {
logger.error("Container {} create has not reflected yet, retrying after 5 sec", containerId);
Thread.sleep(5000);
}
}
container.readItem(warmUpItem.getId(), new PartitionKey(warmUpItem.getId()), null,
ConflictTestPojo.class).block();
}
}
private void createItemsInParallelForConflicts(List<CosmosAsyncContainer> containers, String conflictId) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
new Thread(() -> {
ConflictTestPojo conflictObject = new ConflictTestPojo();
conflictObject.setId(conflictId);
conflictObject.setMypk(conflictId);
conflictObject.setRegionId(finalI);
tryInsertDocumentTest(containers.get(finalI), conflictObject).block();
}).start();
}
}
private void replaceDeleteItemInParallelForConflicts(List<CosmosAsyncContainer> containers,
CosmosItemResponse<ConflictTestPojo> itemResponse) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
if (i == 0) {
new Thread(() -> {
tryReplaceDocumentTest(containers.get(finalI), itemResponse.getItem()).block();
}).start();
} else {
new Thread(() -> {
tryDeleteDocumentTest(containers.get(finalI), itemResponse.getItem().getId()).block();
}).start();
}
}
}
} | class CosmosConflictsTest extends TestSuiteBase {
private static final int CONFLICT_TIMEOUT = 120000;
private static Logger logger = LoggerFactory.getLogger(CosmosConflictsTest.class);
private static final String SKIP_SINGLE_REGION_MM_ACCOUNT = "Multi master account doesn't have multiple write " +
"regions to test this";
private String sprocBody;
private CosmosAsyncClient globalClient;
private List<CosmosAsyncClient> regionalClients;
@BeforeClass(groups = {"multi-master"}, timeOut = SETUP_TIMEOUT)
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictDefaultLWW() throws InterruptedException {
String conflictId = "conflict";
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(globalClient);
if (this.regionalClients.size() > 1) {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, asyncContainer.getDatabase().getId(), asyncContainer.getId());
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomLWW() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictCustomLWWContainer"
, "/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createLastWriterWinsPolicy(
"/regionId");
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(containers.size() - 1);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictSprocContainer",
"/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(database.getId(), containerProperties.getId(), sprocId);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
CosmosAsyncContainer asyncContainer = database.getContainer(containerProperties.getId());
CosmosStoredProcedureProperties procedureProperties = new CosmosStoredProcedureProperties(sprocId,
sprocBody);
asyncContainer.getScripts().createStoredProcedure(procedureProperties).block();
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(0);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictNonExistingCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictNonExistingCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties(
"conflictNonExistingSprocContainer",
"/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(database.getId(), containerProperties.getId(), sprocId);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
List<ConflictTestPojo> testPojos = new ArrayList<>();
List<String> conflictIds = new ArrayList<>();
Iterator<FeedResponse<CosmosConflictProperties>> iterator = null;
for (int j = 0; j < 5; j++) {
conflictId = conflictId + j;
boolean conflictCreated = false;
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(5000);
for (int i = 1; i < 4; i++) {
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, conflictIds);
if (testPojos.size() == 0) {
logger.error("Conflict on {} insert operation has not reflected yet, retrying read after " +
"5 sec",
containers.get(0).getId());
Thread.sleep(5000);
} else {
conflictCreated = true;
break;
}
}
if (conflictCreated) {
break;
}
logger.error("Conflict on {} not created, retrying again",
containers.get(0).getId());
}
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
String query = String.format("SELECT * from c where c.id in (%s)",
Strings.join(conflictIds.stream().map(s -> "'" + s + "'").collect(Collectors.toList())).with(","));
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(conflictId));
iterator = containers.get(0).queryConflicts(query, options).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
iterator = containers.get(0).queryConflicts(query).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
for (String id : conflictIds) {
CosmosConflictRequestOptions requestOptions =
new CosmosConflictRequestOptions(new PartitionKey(conflictId));
containers.get(0).getConflict(id).delete(requestOptions).block();
}
Thread.sleep(5000);
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
testPojos.clear();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getId()).isEqualTo(conflictId);
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(this.globalClient);
for (CosmosAsyncClient asyncClient : this.regionalClients)
safeClose(asyncClient);
}
private Mono<CosmosItemResponse<ConflictTestPojo>> tryInsertDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.createItem(test, new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<ConflictTestPojo>> tryReplaceDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.replaceItem(test, test.getId(), new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<Object>> tryDeleteDocumentTest(CosmosAsyncContainer container, String id) {
return container.deleteItem(id, new PartitionKey(id), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private boolean hasCosmosConflictException(Throwable e, int statusCode) {
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
return cosmosException.getStatusCode() == statusCode;
}
return false;
}
private void readConflicts(Iterator<FeedResponse<CosmosConflictProperties>> iterator,
List<ConflictTestPojo> pojoList,
List<String> conflictIds) {
while (iterator.hasNext()) {
for (CosmosConflictProperties conflict : iterator.next().getResults()) {
pojoList.add(conflict.getItem(ConflictTestPojo.class));
if (conflictIds != null) {
conflictIds.add(conflict.getId());
}
}
}
}
private ConflictTestPojo getTest() {
ConflictTestPojo test = new ConflictTestPojo();
String uuid = UUID.randomUUID().toString();
test.setId(uuid);
test.setMypk(uuid);
test.setRegionId(0);
return test;
}
private void warmingUpClient(List<CosmosAsyncContainer> asyncContainers, String dbId, String containerId) throws InterruptedException {
for (CosmosAsyncClient asyncClient : this.regionalClients) {
CosmosAsyncContainer container =
asyncClient.getDatabase(dbId).getContainer(containerId);
ConflictTestPojo warmUpItem = getTest();
for (int i = 1; i <= 4; i++) {
try {
container.createItem(warmUpItem).block();
asyncContainers.add(container);
break;
} catch (CosmosException ex) {
logger.error("Container {} create has not reflected yet, retrying after 5 sec", containerId);
Thread.sleep(5000);
}
}
container.readItem(warmUpItem.getId(), new PartitionKey(warmUpItem.getId()), null,
ConflictTestPojo.class).block();
}
}
private void createItemsInParallelForConflicts(List<CosmosAsyncContainer> containers, String conflictId) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
new Thread(() -> {
ConflictTestPojo conflictObject = new ConflictTestPojo();
conflictObject.setId(conflictId);
conflictObject.setMypk(conflictId);
conflictObject.setRegionId(finalI);
tryInsertDocumentTest(containers.get(finalI), conflictObject).block();
}).start();
}
}
private void replaceDeleteItemInParallelForConflicts(List<CosmosAsyncContainer> containers,
CosmosItemResponse<ConflictTestPojo> itemResponse) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
if (i == 0) {
new Thread(() -> {
tryReplaceDocumentTest(containers.get(finalI), itemResponse.getItem()).block();
}).start();
} else {
new Thread(() -> {
tryDeleteDocumentTest(containers.get(finalI), itemResponse.getItem().getId()).block();
}).start();
}
}
}
} |
Done | public void before_ConflictTests() throws Exception {
sprocBody = IOUtils.toString(
getClass().getClassLoader().getResourceAsStream("conflict-resolver-sproc"), "UTF-8");
globalClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.endpointDiscoveryEnabled(false)
.directMode()
.buildAsyncClient();
GlobalEndpointManager globalEndpointManager =
ReflectionUtils.getGlobalEndpointManager((RxDocumentClientImpl) globalClient.getContextClient());
DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount();
Iterator<DatabaseAccountLocation> locationIterator = databaseAccount.getWritableLocations().iterator();
regionalClients = new ArrayList<>();
while (locationIterator.hasNext()) {
DatabaseAccountLocation accountLocation = locationIterator.next();
CosmosAsyncClient regionalClient = new CosmosClientBuilder()
.endpoint(accountLocation.getEndpoint())
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.endpointDiscoveryEnabled(false)
.directMode()
.buildAsyncClient();
regionalClients.add(regionalClient);
}
} | .endpoint(accountLocation.getEndpoint()) | public void before_ConflictTests() throws Exception {
sprocBody = IOUtils.toString(
getClass().getClassLoader().getResourceAsStream("conflict-resolver-sproc"), "UTF-8");
globalClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.endpointDiscoveryEnabled(false)
.directMode()
.buildAsyncClient();
GlobalEndpointManager globalEndpointManager =
ReflectionUtils.getGlobalEndpointManager((RxDocumentClientImpl) globalClient.getContextClient());
DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount();
Iterator<DatabaseAccountLocation> locationIterator = databaseAccount.getWritableLocations().iterator();
regionalClients = new ArrayList<>();
while (locationIterator.hasNext()) {
DatabaseAccountLocation accountLocation = locationIterator.next();
List<String> prefferedLocations = new ArrayList<>();
prefferedLocations.add(accountLocation.getName());
CosmosAsyncClient regionalClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.preferredRegions(prefferedLocations)
.directMode()
.buildAsyncClient();
regionalClients.add(regionalClient);
}
} | class CosmosConflictsTest extends TestSuiteBase {
private static final int CONFLICT_TIMEOUT = 120000;
private static Logger logger = LoggerFactory.getLogger(CosmosConflictsTest.class);
private String sprocBody;
private CosmosAsyncClient globalClient;
private List<CosmosAsyncClient> regionalClients;
@BeforeClass(groups = {"multi-master"}, timeOut = SETUP_TIMEOUT)
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictDefaultLWW() throws InterruptedException {
String conflictId = "conflict";
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(globalClient);
if (this.regionalClients.size() > 1) {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, asyncContainer.getDatabase().getId(), asyncContainer.getId());
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} else {
fail("Unable to find multi master account");
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomLWW() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictCustomLWWContainer"
, "/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createLastWriterWinsPolicy(
"/regionId");
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(containers.size() - 1);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
fail("Unable to find multi master account");
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictSprocContainer",
"/mypk");
CosmosAsyncStoredProcedure storedProcedure =
database.getContainer(containerProperties.getId()).getScripts().getStoredProcedure(sprocId);
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(storedProcedure);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
CosmosAsyncContainer asyncContainer = database.getContainer(containerProperties.getId());
CosmosStoredProcedureProperties procedureProperties = new CosmosStoredProcedureProperties(sprocId,
sprocBody);
asyncContainer.getScripts().createStoredProcedure(procedureProperties).block();
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(0);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
fail("Unable to find multi master account");
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictNonExistingCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictNonExistingCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties(
"conflictNonExistingSprocContainer",
"/mypk");
CosmosAsyncStoredProcedure storedProcedure =
database.getContainer(containerProperties.getId()).getScripts().getStoredProcedure(sprocId);
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(storedProcedure);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
List<ConflictTestPojo> testPojos = new ArrayList<>();
List<String> conflictIds = new ArrayList<>();
Iterator<FeedResponse<CosmosConflictProperties>> iterator = null;
for (int j = 0; j < 5; j++) {
conflictId = conflictId + j;
boolean conflictCreated = false;
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(5000);
for (int i = 1; i < 4; i++) {
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, conflictIds);
if (testPojos.size() == 0) {
logger.error("Conflict on {} insert operation has not reflected yet, retrying read after " +
"5 sec",
containers.get(0).getId());
Thread.sleep(5000);
} else {
conflictCreated = true;
break;
}
}
if (conflictCreated) {
break;
}
logger.error("Conflict on {} not created, retrying again",
containers.get(0).getId());
}
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
String query = String.format("SELECT * from c where c.id in (%s)",
Strings.join(conflictIds.stream().map(s -> "'" + s + "'").collect(Collectors.toList())).with(","));
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(conflictId));
iterator = containers.get(0).queryConflicts(query, options).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
iterator = containers.get(0).queryConflicts(query).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
for (String id : conflictIds) {
CosmosConflictRequestOptions requestOptions =
new CosmosConflictRequestOptions(new PartitionKey(conflictId));
containers.get(0).getConflict(id).delete(requestOptions).block();
}
Thread.sleep(5000);
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
testPojos.clear();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getId()).isEqualTo(conflictId);
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
fail("Unable to find multi master account");
}
}
@AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(this.globalClient);
for (CosmosAsyncClient asyncClient : this.regionalClients)
safeClose(asyncClient);
}
private Mono<CosmosItemResponse<ConflictTestPojo>> tryInsertDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.createItem(test, new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<ConflictTestPojo>> tryReplaceDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.replaceItem(test, test.getId(), new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<Object>> tryDeleteDocumentTest(CosmosAsyncContainer container, String id) {
return container.deleteItem(id, new PartitionKey(id), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private boolean hasCosmosConflictException(Throwable e, int statusCode) {
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
return cosmosException.getStatusCode() == statusCode;
}
return false;
}
private void readConflicts(Iterator<FeedResponse<CosmosConflictProperties>> iterator,
List<ConflictTestPojo> pojoList,
List<String> conflictIds) {
while (iterator.hasNext()) {
for (CosmosConflictProperties conflict : iterator.next().getResults()) {
pojoList.add(conflict.getItem(ConflictTestPojo.class));
if (conflictIds != null) {
conflictIds.add(conflict.getId());
}
}
}
}
private ConflictTestPojo getTest() {
ConflictTestPojo test = new ConflictTestPojo();
String uuid = UUID.randomUUID().toString();
test.setId(uuid);
test.setMypk(uuid);
test.setRegionId(0);
return test;
}
private void warmingUpClient(List<CosmosAsyncContainer> asyncContainers, String dbId, String containerId) throws InterruptedException {
for (CosmosAsyncClient asyncClient : this.regionalClients) {
CosmosAsyncContainer container =
asyncClient.getDatabase(dbId).getContainer(containerId);
ConflictTestPojo warmUpItem = getTest();
for (int i = 1; i <= 4; i++) {
try {
container.createItem(warmUpItem).block();
asyncContainers.add(container);
break;
} catch (CosmosException ex) {
logger.error("Container {} create has not reflected yet, retrying after 5 sec", containerId);
Thread.sleep(5000);
}
}
container.readItem(warmUpItem.getId(), new PartitionKey(warmUpItem.getId()), null,
ConflictTestPojo.class).block();
}
}
private void createItemsInParallelForConflicts(List<CosmosAsyncContainer> containers, String conflictId) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
new Thread(() -> {
ConflictTestPojo conflictObject = new ConflictTestPojo();
conflictObject.setId(conflictId);
conflictObject.setMypk(conflictId);
conflictObject.setRegionId(finalI);
tryInsertDocumentTest(containers.get(finalI), conflictObject).block();
}).start();
}
}
private void replaceDeleteItemInParallelForConflicts(List<CosmosAsyncContainer> containers,
CosmosItemResponse<ConflictTestPojo> itemResponse) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
if (i == 0) {
new Thread(() -> {
tryReplaceDocumentTest(containers.get(finalI), itemResponse.getItem()).block();
}).start();
} else {
new Thread(() -> {
tryDeleteDocumentTest(containers.get(finalI), itemResponse.getItem().getId()).block();
}).start();
}
}
}
} | class CosmosConflictsTest extends TestSuiteBase {
private static final int CONFLICT_TIMEOUT = 120000;
private static Logger logger = LoggerFactory.getLogger(CosmosConflictsTest.class);
private static final String SKIP_SINGLE_REGION_MM_ACCOUNT = "Multi master account doesn't have multiple write " +
"regions to test this";
private String sprocBody;
private CosmosAsyncClient globalClient;
private List<CosmosAsyncClient> regionalClients;
@BeforeClass(groups = {"multi-master"}, timeOut = SETUP_TIMEOUT)
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictDefaultLWW() throws InterruptedException {
String conflictId = "conflict";
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(globalClient);
if (this.regionalClients.size() > 1) {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, asyncContainer.getDatabase().getId(), asyncContainer.getId());
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomLWW() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictCustomLWWContainer"
, "/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createLastWriterWinsPolicy(
"/regionId");
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(containers.size() - 1);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties("conflictSprocContainer",
"/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(database.getId(), containerProperties.getId(), sprocId);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
CosmosAsyncContainer asyncContainer = database.getContainer(containerProperties.getId());
CosmosStoredProcedureProperties procedureProperties = new CosmosStoredProcedureProperties(sprocId,
sprocBody);
asyncContainer.getScripts().createStoredProcedure(procedureProperties).block();
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(10000);
Iterator<FeedResponse<CosmosConflictProperties>> iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
List<ConflictTestPojo> testPojos = new ArrayList<>();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getRegionId()).isEqualTo(0);
replaceDeleteItemInParallelForConflicts(containers, itemResponse);
Thread.sleep(10000);
try {
containers.get(0).readItem(conflictId, new PartitionKey(conflictId), null,
ConflictTestPojo.class).block();
fail("Delete should always win in conflict scenerio");
} catch (CosmosException ex) {
assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
}
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@Test(groups = {"multi-master"}, timeOut = CONFLICT_TIMEOUT)
public void conflictNonExistingCustomSproc() throws InterruptedException {
if (this.regionalClients.size() > 1) {
CosmosAsyncDatabase database = getSharedCosmosDatabase(globalClient);
String sprocId = "conflictNonExistingCustomSproc";
CosmosContainerProperties containerProperties = new CosmosContainerProperties(
"conflictNonExistingSprocContainer",
"/mypk");
ConflictResolutionPolicy resolutionPolicy = ConflictResolutionPolicy.createCustomPolicy(database.getId(), containerProperties.getId(), sprocId);
containerProperties.setConflictResolutionPolicy(resolutionPolicy);
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(400)).block();
Thread.sleep(5000);
try {
List<CosmosAsyncContainer> containers = new ArrayList<>();
warmingUpClient(containers, database.getId(), containerProperties.getId());
String conflictId = "conflict";
List<ConflictTestPojo> testPojos = new ArrayList<>();
List<String> conflictIds = new ArrayList<>();
Iterator<FeedResponse<CosmosConflictProperties>> iterator = null;
for (int j = 0; j < 5; j++) {
conflictId = conflictId + j;
boolean conflictCreated = false;
createItemsInParallelForConflicts(containers, conflictId);
Thread.sleep(5000);
for (int i = 1; i < 4; i++) {
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, conflictIds);
if (testPojos.size() == 0) {
logger.error("Conflict on {} insert operation has not reflected yet, retrying read after " +
"5 sec",
containers.get(0).getId());
Thread.sleep(5000);
} else {
conflictCreated = true;
break;
}
}
if (conflictCreated) {
break;
}
logger.error("Conflict on {} not created, retrying again",
containers.get(0).getId());
}
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
String query = String.format("SELECT * from c where c.id in (%s)",
Strings.join(conflictIds.stream().map(s -> "'" + s + "'").collect(Collectors.toList())).with(","));
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(conflictId));
iterator = containers.get(0).queryConflicts(query, options).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
testPojos.clear();
iterator = containers.get(0).queryConflicts(query).byPage().toIterable().iterator();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(containers.size() - 1);
for (String id : conflictIds) {
CosmosConflictRequestOptions requestOptions =
new CosmosConflictRequestOptions(new PartitionKey(conflictId));
containers.get(0).getConflict(id).delete(requestOptions).block();
}
Thread.sleep(5000);
iterator =
containers.get(0).readAllConflicts(new CosmosQueryRequestOptions()).byPage().toIterable().iterator();
testPojos.clear();
readConflicts(iterator, testPojos, null);
assertThat(testPojos.size()).isEqualTo(0);
CosmosItemResponse<ConflictTestPojo> itemResponse = containers.get(0).readItem(conflictId,
new PartitionKey(conflictId), null, ConflictTestPojo.class).block();
assertThat(itemResponse.getItem().getId()).isEqualTo(conflictId);
} finally {
database.getContainer(containerProperties.getId()).delete().block();
}
} else {
throw new SkipException(SKIP_SINGLE_REGION_MM_ACCOUNT);
}
}
@AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(this.globalClient);
for (CosmosAsyncClient asyncClient : this.regionalClients)
safeClose(asyncClient);
}
private Mono<CosmosItemResponse<ConflictTestPojo>> tryInsertDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.createItem(test, new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<ConflictTestPojo>> tryReplaceDocumentTest(CosmosAsyncContainer container,
ConflictTestPojo test) {
return container.replaceItem(test, test.getId(), new PartitionKey(test.getId()), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private Mono<CosmosItemResponse<Object>> tryDeleteDocumentTest(CosmosAsyncContainer container, String id) {
return container.deleteItem(id, new PartitionKey(id), new CosmosItemRequestOptions())
.onErrorResume(e -> {
if (hasCosmosConflictException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
});
}
private boolean hasCosmosConflictException(Throwable e, int statusCode) {
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
return cosmosException.getStatusCode() == statusCode;
}
return false;
}
private void readConflicts(Iterator<FeedResponse<CosmosConflictProperties>> iterator,
List<ConflictTestPojo> pojoList,
List<String> conflictIds) {
while (iterator.hasNext()) {
for (CosmosConflictProperties conflict : iterator.next().getResults()) {
pojoList.add(conflict.getItem(ConflictTestPojo.class));
if (conflictIds != null) {
conflictIds.add(conflict.getId());
}
}
}
}
private ConflictTestPojo getTest() {
ConflictTestPojo test = new ConflictTestPojo();
String uuid = UUID.randomUUID().toString();
test.setId(uuid);
test.setMypk(uuid);
test.setRegionId(0);
return test;
}
private void warmingUpClient(List<CosmosAsyncContainer> asyncContainers, String dbId, String containerId) throws InterruptedException {
for (CosmosAsyncClient asyncClient : this.regionalClients) {
CosmosAsyncContainer container =
asyncClient.getDatabase(dbId).getContainer(containerId);
ConflictTestPojo warmUpItem = getTest();
for (int i = 1; i <= 4; i++) {
try {
container.createItem(warmUpItem).block();
asyncContainers.add(container);
break;
} catch (CosmosException ex) {
logger.error("Container {} create has not reflected yet, retrying after 5 sec", containerId);
Thread.sleep(5000);
}
}
container.readItem(warmUpItem.getId(), new PartitionKey(warmUpItem.getId()), null,
ConflictTestPojo.class).block();
}
}
private void createItemsInParallelForConflicts(List<CosmosAsyncContainer> containers, String conflictId) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
new Thread(() -> {
ConflictTestPojo conflictObject = new ConflictTestPojo();
conflictObject.setId(conflictId);
conflictObject.setMypk(conflictId);
conflictObject.setRegionId(finalI);
tryInsertDocumentTest(containers.get(finalI), conflictObject).block();
}).start();
}
}
private void replaceDeleteItemInParallelForConflicts(List<CosmosAsyncContainer> containers,
CosmosItemResponse<ConflictTestPojo> itemResponse) {
for (int i = 0; i < containers.size(); i++) {
int finalI = i;
if (i == 0) {
new Thread(() -> {
tryReplaceDocumentTest(containers.get(finalI), itemResponse.getItem()).block();
}).start();
} else {
new Thread(() -> {
tryDeleteDocumentTest(containers.get(finalI), itemResponse.getItem().getId()).block();
}).start();
}
}
}
} |
We have a [SimpleTokenCache](https://github.com/Azure/azure-sdk-for-java/blob/master/sdk/core/azure-core/src/main/java/com/azure/core/credential/SimpleTokenCache.java) that can be used here. It refreshes the token when it expires and also ensures that the refresh happens only once after each expiration period. | public Mono<AccessToken> getToken() throws InterruptedException, ExecutionException {
if (isClosed) {
throw logger.logExceptionAsError(
new RuntimeException("getToken called on closed CommunicationTokenCredential object"));
}
synchronized (this) {
if ((accessToken == null || accessToken.isExpired()) && refresher != null) {
return fetchFreshToken()
.map(token -> {
accessToken = tokenParser.parseJWTToken(token);
return accessToken;
});
}
return Mono.just(accessToken);
}
} | return Mono.just(accessToken); | public Mono<AccessToken> getToken() throws InterruptedException, ExecutionException {
if (isClosed) {
return FluxUtil.monoError(logger,
new RuntimeException("getToken called on closed CommunicationTokenCredential object"));
}
if ((accessToken == null || accessToken.isExpired()) && refresher != null) {
synchronized (this) {
if ((accessToken == null || accessToken.isExpired()) && refresher != null) {
return fetchFreshToken()
.map(token -> {
accessToken = tokenParser.parseJWTToken(token);
return accessToken;
});
}
}
}
return Mono.just(accessToken);
} | class CommunicationTokenCredential implements AutoCloseable {
private static final int DEFAULT_EXPIRING_OFFSET_MINUTES = 10;
private final ClientLogger logger = new ClientLogger(CommunicationTokenCredential.class);
private AccessToken accessToken;
private final TokenParser tokenParser = new TokenParser();
private TokenRefresher refresher;
private FetchingTask fetchingTask;
private boolean isClosed = false;
/**
* Create with serialized JWT token
*
* @param initialToken serialized JWT token
*/
public CommunicationTokenCredential(String initialToken) {
Objects.requireNonNull(initialToken, "'initialToken' cannot be null.");
setToken(initialToken);
}
/**
* Create with a tokenRefresher
*
* @param tokenRefresher implementation to supply fresh token when reqested
*/
public CommunicationTokenCredential(TokenRefresher tokenRefresher) {
Objects.requireNonNull(tokenRefresher, "'tokenRefresher' cannot be null.");
refresher = tokenRefresher;
}
/**
* Create with serialized JWT token and a token supplier to auto-refresh the
* token before it expires. Callback function tokenRefresher will be called
* ahead of the token expiry by the number of minutes specified by
* CallbackOffsetMinutes defaulted to two minutes. To modify this default, call
* setCallbackOffsetMinutes after construction
*
* @param tokenRefresher implementation to supply fresh token when reqested
* @param initialToken serialized JWT token
* @param refreshProactively when set to true, turn on proactive fetching to call
* tokenRefresher before token expiry by minutes set
* with setCallbackOffsetMinutes or default value of
* two minutes
*/
public CommunicationTokenCredential(TokenRefresher tokenRefresher, String initialToken,
boolean refreshProactively) {
this(tokenRefresher);
Objects.requireNonNull(initialToken, "'initialToken' cannot be null.");
setToken(initialToken);
if (refreshProactively) {
OffsetDateTime nextFetchTime = accessToken.getExpiresAt().minusMinutes(DEFAULT_EXPIRING_OFFSET_MINUTES);
fetchingTask = new FetchingTask(this, nextFetchTime);
}
}
/**
* Get Azure core access token from credential
*
* @return Asynchronous call to fetch actual token
* @throws ExecutionException when supplier throws this exception
* @throws InterruptedException when supplier throws this exception
*/
@Override
public void close() throws IOException {
isClosed = true;
if (fetchingTask != null) {
fetchingTask.stopTimer();
fetchingTask = null;
}
refresher = null;
}
boolean hasProactiveFetcher() {
return fetchingTask != null;
}
private void setToken(String freshToken) {
accessToken = tokenParser.parseJWTToken(freshToken);
if (fetchingTask != null) {
OffsetDateTime nextFetchTime = accessToken.getExpiresAt().minusMinutes(DEFAULT_EXPIRING_OFFSET_MINUTES);
fetchingTask.setNextFetchTime(nextFetchTime);
}
}
private Mono<String> fetchFreshToken() {
Mono<String> tokenAsync = refresher.getTokenAsync();
if (tokenAsync == null) {
throw logger.logExceptionAsError(
new RuntimeException("TokenRefresher returned null when getTokenAsync is called"));
}
return tokenAsync;
}
private static class FetchingTask {
private final CommunicationTokenCredential host;
private Timer expiringTimer;
private OffsetDateTime nextFetchTime;
FetchingTask(CommunicationTokenCredential tokenHost,
OffsetDateTime nextFetchAt) {
host = tokenHost;
nextFetchTime = nextFetchAt;
startTimer();
}
private synchronized void setNextFetchTime(OffsetDateTime newFetchTime) {
nextFetchTime = newFetchTime;
stopTimer();
startTimer();
}
private synchronized void startTimer() {
expiringTimer = new Timer();
Date expiring = Date.from(nextFetchTime.toInstant());
expiringTimer.schedule(new TokenExpiringTask(this), expiring);
}
private synchronized void stopTimer() {
if (expiringTimer == null) {
return;
}
expiringTimer.cancel();
expiringTimer.purge();
expiringTimer = null;
}
private Mono<String> fetchFreshToken() {
return host.fetchFreshToken();
}
private void setToken(String freshTokenString) {
host.setToken(freshTokenString);
}
private class TokenExpiringTask extends TimerTask {
private final ClientLogger logger = new ClientLogger(TokenExpiringTask.class);
private final FetchingTask tokenCache;
TokenExpiringTask(FetchingTask host) {
tokenCache = host;
}
@Override
public void run() {
try {
Mono<String> tokenAsync = tokenCache.fetchFreshToken();
tokenCache.setToken(tokenAsync.block());
} catch (Exception exception) {
logger.logExceptionAsError(new RuntimeException(exception));
}
}
}
}
} | class CommunicationTokenCredential implements AutoCloseable {
private static final int DEFAULT_EXPIRING_OFFSET_MINUTES = 10;
private final ClientLogger logger = new ClientLogger(CommunicationTokenCredential.class);
private AccessToken accessToken;
private final TokenParser tokenParser = new TokenParser();
private TokenRefresher refresher;
private FetchingTask fetchingTask;
private boolean isClosed = false;
/**
* Create with serialized JWT token
*
* @param initialToken serialized JWT token
*/
public CommunicationTokenCredential(String initialToken) {
Objects.requireNonNull(initialToken, "'initialToken' cannot be null.");
setToken(initialToken);
}
/**
* Create with a tokenRefresher
*
* @param tokenRefresher implementation to supply fresh token when reqested
*/
public CommunicationTokenCredential(TokenRefresher tokenRefresher) {
Objects.requireNonNull(tokenRefresher, "'tokenRefresher' cannot be null.");
refresher = tokenRefresher;
}
/**
* Create with serialized JWT token and a token supplier to auto-refresh the
* token before it expires. Callback function tokenRefresher will be called
* ahead of the token expiry by the number of minutes specified by
* CallbackOffsetMinutes defaulted to two minutes. To modify this default, call
* setCallbackOffsetMinutes after construction
*
* @param tokenRefresher implementation to supply fresh token when reqested
* @param initialToken serialized JWT token
* @param refreshProactively when set to true, turn on proactive fetching to call
* tokenRefresher before token expiry by minutes set
* with setCallbackOffsetMinutes or default value of
* two minutes
*/
public CommunicationTokenCredential(TokenRefresher tokenRefresher, String initialToken,
boolean refreshProactively) {
this(tokenRefresher);
Objects.requireNonNull(initialToken, "'initialToken' cannot be null.");
setToken(initialToken);
if (refreshProactively) {
OffsetDateTime nextFetchTime = accessToken.getExpiresAt().minusMinutes(DEFAULT_EXPIRING_OFFSET_MINUTES);
fetchingTask = new FetchingTask(this, nextFetchTime);
}
}
/**
* Get Azure core access token from credential
*
* @return Asynchronous call to fetch actual token
* @throws ExecutionException when supplier throws this exception
* @throws InterruptedException when supplier throws this exception
*/
@Override
public void close() throws IOException {
isClosed = true;
if (fetchingTask != null) {
fetchingTask.stopTimer();
fetchingTask = null;
}
refresher = null;
}
boolean hasProactiveFetcher() {
return fetchingTask != null;
}
private void setToken(String freshToken) {
accessToken = tokenParser.parseJWTToken(freshToken);
if (fetchingTask != null) {
OffsetDateTime nextFetchTime = accessToken.getExpiresAt().minusMinutes(DEFAULT_EXPIRING_OFFSET_MINUTES);
fetchingTask.setNextFetchTime(nextFetchTime);
}
}
private Mono<String> fetchFreshToken() {
Mono<String> tokenAsync = refresher.getTokenAsync();
if (tokenAsync == null) {
return FluxUtil.monoError(logger,
new RuntimeException("TokenRefresher returned null when getTokenAsync is called"));
}
return tokenAsync;
}
private static class FetchingTask {
private final CommunicationTokenCredential host;
private Timer expiringTimer;
private OffsetDateTime nextFetchTime;
FetchingTask(CommunicationTokenCredential tokenHost,
OffsetDateTime nextFetchAt) {
host = tokenHost;
nextFetchTime = nextFetchAt;
startTimer();
}
private synchronized void setNextFetchTime(OffsetDateTime newFetchTime) {
nextFetchTime = newFetchTime;
stopTimer();
startTimer();
}
private synchronized void startTimer() {
expiringTimer = new Timer();
Date expiring = Date.from(nextFetchTime.toInstant());
expiringTimer.schedule(new TokenExpiringTask(this), expiring);
}
private synchronized void stopTimer() {
if (expiringTimer == null) {
return;
}
expiringTimer.cancel();
expiringTimer.purge();
expiringTimer = null;
}
private Mono<String> fetchFreshToken() {
return host.fetchFreshToken();
}
private void setToken(String freshTokenString) {
host.setToken(freshTokenString);
}
private class TokenExpiringTask extends TimerTask {
private final ClientLogger logger = new ClientLogger(TokenExpiringTask.class);
private final FetchingTask tokenCache;
TokenExpiringTask(FetchingTask host) {
tokenCache = host;
}
@Override
public void run() {
try {
Mono<String> tokenAsync = tokenCache.fetchFreshToken();
tokenCache.setToken(tokenAsync.block());
} catch (Exception exception) {
logger.logExceptionAsError(new RuntimeException(exception));
}
}
}
}
} |
As discussed, return a Mono.error() here instead of throwing an exception. | public Mono<AccessToken> getToken() throws InterruptedException, ExecutionException {
if (isClosed) {
throw logger.logExceptionAsError(
new RuntimeException("getToken called on closed CommunicationTokenCredential object"));
}
if ((accessToken == null || accessToken.isExpired()) && refresher != null) {
synchronized (this) {
if ((accessToken == null || accessToken.isExpired()) && refresher != null) {
return fetchFreshToken()
.map(token -> {
accessToken = tokenParser.parseJWTToken(token);
return accessToken;
});
}
}
}
return Mono.just(accessToken);
} | } | public Mono<AccessToken> getToken() throws InterruptedException, ExecutionException {
if (isClosed) {
return FluxUtil.monoError(logger,
new RuntimeException("getToken called on closed CommunicationTokenCredential object"));
}
if ((accessToken == null || accessToken.isExpired()) && refresher != null) {
synchronized (this) {
if ((accessToken == null || accessToken.isExpired()) && refresher != null) {
return fetchFreshToken()
.map(token -> {
accessToken = tokenParser.parseJWTToken(token);
return accessToken;
});
}
}
}
return Mono.just(accessToken);
} | class CommunicationTokenCredential implements AutoCloseable {
private static final int DEFAULT_EXPIRING_OFFSET_MINUTES = 10;
private final ClientLogger logger = new ClientLogger(CommunicationTokenCredential.class);
private AccessToken accessToken;
private final TokenParser tokenParser = new TokenParser();
private TokenRefresher refresher;
private FetchingTask fetchingTask;
private boolean isClosed = false;
/**
* Create with serialized JWT token
*
* @param initialToken serialized JWT token
*/
public CommunicationTokenCredential(String initialToken) {
Objects.requireNonNull(initialToken, "'initialToken' cannot be null.");
setToken(initialToken);
}
/**
* Create with a tokenRefresher
*
* @param tokenRefresher implementation to supply fresh token when reqested
*/
public CommunicationTokenCredential(TokenRefresher tokenRefresher) {
Objects.requireNonNull(tokenRefresher, "'tokenRefresher' cannot be null.");
refresher = tokenRefresher;
}
/**
* Create with serialized JWT token and a token supplier to auto-refresh the
* token before it expires. Callback function tokenRefresher will be called
* ahead of the token expiry by the number of minutes specified by
* CallbackOffsetMinutes defaulted to two minutes. To modify this default, call
* setCallbackOffsetMinutes after construction
*
* @param tokenRefresher implementation to supply fresh token when reqested
* @param initialToken serialized JWT token
* @param refreshProactively when set to true, turn on proactive fetching to call
* tokenRefresher before token expiry by minutes set
* with setCallbackOffsetMinutes or default value of
* two minutes
*/
public CommunicationTokenCredential(TokenRefresher tokenRefresher, String initialToken,
boolean refreshProactively) {
this(tokenRefresher);
Objects.requireNonNull(initialToken, "'initialToken' cannot be null.");
setToken(initialToken);
if (refreshProactively) {
OffsetDateTime nextFetchTime = accessToken.getExpiresAt().minusMinutes(DEFAULT_EXPIRING_OFFSET_MINUTES);
fetchingTask = new FetchingTask(this, nextFetchTime);
}
}
/**
* Get Azure core access token from credential
*
* @return Asynchronous call to fetch actual token
* @throws ExecutionException when supplier throws this exception
* @throws InterruptedException when supplier throws this exception
*/
@Override
public void close() throws IOException {
isClosed = true;
if (fetchingTask != null) {
fetchingTask.stopTimer();
fetchingTask = null;
}
refresher = null;
}
boolean hasProactiveFetcher() {
return fetchingTask != null;
}
private void setToken(String freshToken) {
accessToken = tokenParser.parseJWTToken(freshToken);
if (fetchingTask != null) {
OffsetDateTime nextFetchTime = accessToken.getExpiresAt().minusMinutes(DEFAULT_EXPIRING_OFFSET_MINUTES);
fetchingTask.setNextFetchTime(nextFetchTime);
}
}
private Mono<String> fetchFreshToken() {
Mono<String> tokenAsync = refresher.getTokenAsync();
if (tokenAsync == null) {
throw logger.logExceptionAsError(
new RuntimeException("TokenRefresher returned null when getTokenAsync is called"));
}
return tokenAsync;
}
private static class FetchingTask {
private final CommunicationTokenCredential host;
private Timer expiringTimer;
private OffsetDateTime nextFetchTime;
FetchingTask(CommunicationTokenCredential tokenHost,
OffsetDateTime nextFetchAt) {
host = tokenHost;
nextFetchTime = nextFetchAt;
startTimer();
}
private synchronized void setNextFetchTime(OffsetDateTime newFetchTime) {
nextFetchTime = newFetchTime;
stopTimer();
startTimer();
}
private synchronized void startTimer() {
expiringTimer = new Timer();
Date expiring = Date.from(nextFetchTime.toInstant());
expiringTimer.schedule(new TokenExpiringTask(this), expiring);
}
private synchronized void stopTimer() {
if (expiringTimer == null) {
return;
}
expiringTimer.cancel();
expiringTimer.purge();
expiringTimer = null;
}
private Mono<String> fetchFreshToken() {
return host.fetchFreshToken();
}
private void setToken(String freshTokenString) {
host.setToken(freshTokenString);
}
private class TokenExpiringTask extends TimerTask {
private final ClientLogger logger = new ClientLogger(TokenExpiringTask.class);
private final FetchingTask tokenCache;
TokenExpiringTask(FetchingTask host) {
tokenCache = host;
}
@Override
public void run() {
try {
Mono<String> tokenAsync = tokenCache.fetchFreshToken();
tokenCache.setToken(tokenAsync.block());
} catch (Exception exception) {
logger.logExceptionAsError(new RuntimeException(exception));
}
}
}
}
} | class CommunicationTokenCredential implements AutoCloseable {
private static final int DEFAULT_EXPIRING_OFFSET_MINUTES = 10;
private final ClientLogger logger = new ClientLogger(CommunicationTokenCredential.class);
private AccessToken accessToken;
private final TokenParser tokenParser = new TokenParser();
private TokenRefresher refresher;
private FetchingTask fetchingTask;
private boolean isClosed = false;
/**
* Create with serialized JWT token
*
* @param initialToken serialized JWT token
*/
public CommunicationTokenCredential(String initialToken) {
Objects.requireNonNull(initialToken, "'initialToken' cannot be null.");
setToken(initialToken);
}
/**
* Create with a tokenRefresher
*
* @param tokenRefresher implementation to supply fresh token when reqested
*/
public CommunicationTokenCredential(TokenRefresher tokenRefresher) {
Objects.requireNonNull(tokenRefresher, "'tokenRefresher' cannot be null.");
refresher = tokenRefresher;
}
/**
* Create with serialized JWT token and a token supplier to auto-refresh the
* token before it expires. Callback function tokenRefresher will be called
* ahead of the token expiry by the number of minutes specified by
* CallbackOffsetMinutes defaulted to two minutes. To modify this default, call
* setCallbackOffsetMinutes after construction
*
* @param tokenRefresher implementation to supply fresh token when reqested
* @param initialToken serialized JWT token
* @param refreshProactively when set to true, turn on proactive fetching to call
* tokenRefresher before token expiry by minutes set
* with setCallbackOffsetMinutes or default value of
* two minutes
*/
public CommunicationTokenCredential(TokenRefresher tokenRefresher, String initialToken,
boolean refreshProactively) {
this(tokenRefresher);
Objects.requireNonNull(initialToken, "'initialToken' cannot be null.");
setToken(initialToken);
if (refreshProactively) {
OffsetDateTime nextFetchTime = accessToken.getExpiresAt().minusMinutes(DEFAULT_EXPIRING_OFFSET_MINUTES);
fetchingTask = new FetchingTask(this, nextFetchTime);
}
}
/**
* Get Azure core access token from credential
*
* @return Asynchronous call to fetch actual token
* @throws ExecutionException when supplier throws this exception
* @throws InterruptedException when supplier throws this exception
*/
@Override
public void close() throws IOException {
isClosed = true;
if (fetchingTask != null) {
fetchingTask.stopTimer();
fetchingTask = null;
}
refresher = null;
}
boolean hasProactiveFetcher() {
return fetchingTask != null;
}
private void setToken(String freshToken) {
accessToken = tokenParser.parseJWTToken(freshToken);
if (fetchingTask != null) {
OffsetDateTime nextFetchTime = accessToken.getExpiresAt().minusMinutes(DEFAULT_EXPIRING_OFFSET_MINUTES);
fetchingTask.setNextFetchTime(nextFetchTime);
}
}
private Mono<String> fetchFreshToken() {
Mono<String> tokenAsync = refresher.getTokenAsync();
if (tokenAsync == null) {
return FluxUtil.monoError(logger,
new RuntimeException("TokenRefresher returned null when getTokenAsync is called"));
}
return tokenAsync;
}
private static class FetchingTask {
private final CommunicationTokenCredential host;
private Timer expiringTimer;
private OffsetDateTime nextFetchTime;
FetchingTask(CommunicationTokenCredential tokenHost,
OffsetDateTime nextFetchAt) {
host = tokenHost;
nextFetchTime = nextFetchAt;
startTimer();
}
private synchronized void setNextFetchTime(OffsetDateTime newFetchTime) {
nextFetchTime = newFetchTime;
stopTimer();
startTimer();
}
private synchronized void startTimer() {
expiringTimer = new Timer();
Date expiring = Date.from(nextFetchTime.toInstant());
expiringTimer.schedule(new TokenExpiringTask(this), expiring);
}
private synchronized void stopTimer() {
if (expiringTimer == null) {
return;
}
expiringTimer.cancel();
expiringTimer.purge();
expiringTimer = null;
}
private Mono<String> fetchFreshToken() {
return host.fetchFreshToken();
}
private void setToken(String freshTokenString) {
host.setToken(freshTokenString);
}
private class TokenExpiringTask extends TimerTask {
private final ClientLogger logger = new ClientLogger(TokenExpiringTask.class);
private final FetchingTask tokenCache;
TokenExpiringTask(FetchingTask host) {
tokenCache = host;
}
@Override
public void run() {
try {
Mono<String> tokenAsync = tokenCache.fetchFreshToken();
tokenCache.setToken(tokenAsync.block());
} catch (Exception exception) {
logger.logExceptionAsError(new RuntimeException(exception));
}
}
}
}
} |
Why is the continuation range needed? Isn't the information in the FeedRange enough (getFeedRange)? | private Mono<RxDocumentServiceRequest> populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
final Range<String> continuationRange = request.getContinuationRange();
if (continuationRange != null &&
!continuationRange.equals(PartitionKeyInternalHelper.FullRange)) {
Mono<Range<String>> getEffectiveRangeTask = request.getFeedRange().getEffectiveRange(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request));
return getEffectiveRangeTask
.flatMap(feedRangeRange -> {
if (!Range.checkOverlapping(
continuationRange,
feedRangeRange)) {
return Mono.error(new NotFoundException(
String.format("Incompatible continuation token range '%s - %s' and feed range '%s - %s'.",
continuationRange.getMin(),
continuationRange.getMax(),
feedRangeRange.getMin(),
feedRangeRange.getMax())));
}
Range.MinComparator<String> minComparator = new Range.MinComparator<>();
Range.MaxComparator<String> maxComparator = new Range.MaxComparator<>();
boolean isMaxInclusive;
boolean isMinInclusive;
String effectiveMax;
String effectiveMin;
if (minComparator.compare(continuationRange, feedRangeRange) > 0) {
effectiveMin = continuationRange.getMin();
isMinInclusive = continuationRange.isMinInclusive();
} else {
effectiveMin = feedRangeRange.getMin();
isMinInclusive = feedRangeRange.isMinInclusive();
}
if (maxComparator.compare(continuationRange, feedRangeRange) <= 0) {
effectiveMax = continuationRange.getMax();
isMaxInclusive = continuationRange.isMaxInclusive();
} else {
effectiveMax = feedRangeRange.getMax();
isMaxInclusive = feedRangeRange.isMaxInclusive();
}
final Range<String> effectiveRange =
new Range<>(effectiveMin, effectiveMax, isMinInclusive, isMaxInclusive);
final FeedRangeInternal effectiveFeedRange =
effectiveRange.equals(feedRangeRange)
? request.getFeedRange() : new FeedRangeEpkImpl(effectiveRange);
return effectiveFeedRange
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
});
}
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
} | final Range<String> continuationRange = request.getContinuationRange(); | private Mono<RxDocumentServiceRequest> populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final HttpClient reactorHttpClient;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs,
credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs,
credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs,
credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length <= 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled) {
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.getAndDecrement();
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
return BridgeInternal.createCosmosDiagnostics(this);
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid");
throw new RuntimeException("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid");
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
public void init() {
try {
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy);
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
clientTelemetry = new ClientTelemetry(null, UUID.randomUUID().toString(),
ManagementFactory.getRuntimeMXBean().getName(), userAgentContainer.getUserAgent(),
connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(),
null, null, httpClient(), connectionPolicy.isClientTelemetryEnabled());
clientTelemetry.init();
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled
);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withRequestTimeout(this.connectionPolicy.getRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig);
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
false
);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public URI getWriteEndpoint() {
return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null);
}
@Override
public URI getReadEndpoint() {
return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(CosmosQueryRequestOptions options) {
return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private <T extends Resource> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this);
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
options);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(resourceLink, sqlQuery, options, klass, resourceTypeEnum, queryClient, activityId),
invalidPartitionExceptionRetryPolicy);
}
private <T extends Resource> Flux<FeedResponse<T>> createQueryInternal(
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createDocumentQueryExecutionContextAsync(this, queryClient, resourceTypeEnum, klass, sqlQuery , options, resourceLink, false, activityId);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedDocumentQueryExecutionContext) {
queryInfo = ((PipelinedDocumentQueryExecutionContext<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
return iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, CosmosQueryRequestOptions options) {
return queryDatabases(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeaders(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeaders(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeaders(request, RequestVerb.GET)
.flatMap(gatewayProxy::processMessage);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeaders(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
CosmosQueryRequestOptions options) {
return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if(options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
return headers;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private static PartitionKeyInternal extractPartitionKeyValueFromDocument(
InternalObjectNode document,
PartitionKeyDefinition partitionKeyDefinition) {
if (partitionKeyDefinition != null) {
String path = partitionKeyDefinition.getPaths().iterator().next();
List<String> parts = PathParser.getPathParts(path);
if (parts.size() >= 1) {
Object value = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, parts);
if (value == null || value.getClass() == ObjectNode.class) {
value = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
}
if (value instanceof PartitionKeyInternal) {
return (PartitionKeyInternal) value;
} else {
return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false);
}
}
}
return null;
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = BridgeInternal.serializeJsonToByteBuffer(document, mapper);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
return request;
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy retryPolicy) {
return populateHeaders(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (requestPopulated.requestContext != null && retryPolicy.getRetryCount() > 0) {
retryPolicy.updateEndTime();
requestPopulated.requestContext.updateRetryContext(retryPolicy, true);
}
return storeProxy.processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeaders(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeaders(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.PATCH);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
@Override
public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy));
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> upsert(request, retryPolicyInstance));
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> patchDocumentInternal(documentLink, cosmosPatchOperations, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
return requestObs.flatMap(req -> patch(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, null, options, requestRetryPolicy), requestRetryPolicy);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, internalObjectNode, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, internalObjectNode, options, collectionObs);
return requestObs.flatMap(req -> this
.delete(req, retryPolicyInstance)
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Document>> readDocuments(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", options);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
CosmosQueryRequestOptions options,
Class<T> klass) {
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap =
new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
itemIdentityList
.forEach(itemIdentity -> {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap;
rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap,
collection.getPartitionKey());
return createReadManyQuery(
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
options,
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap))
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponse(finalList, headers);
return frp;
});
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector);
} else {
sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
if (!Objects.equals(idValue, pkValue)) {
continue;
}
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this);
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(this, queryClient, collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query, CosmosQueryRequestOptions options) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), options);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
return RxDocumentClientImpl.this.query(request).single();
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document);
}
@Override
public Flux<FeedResponse<Document>> queryDocumentChangeFeed(
final String collectionLink,
final CosmosChangeFeedRequestOptions changeFeedOptions) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(this, ResourceType.Document,
Document.class, collectionLink, changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public Flux<FeedResponse<Document>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this);
final CosmosQueryRequestOptions effectiveOptions =
ModelBridgeInternal.createQueryRequestOptions(options);
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
effectiveOptions);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
resourceLink,
querySpec,
ModelBridgeInternal.partitionKeyRangeIdInternal(effectiveOptions, range.getId()),
Document.class,
ResourceType.Document,
queryClient,
activityId);
});
},
invalidPartitionExceptionRetryPolicy);
});
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
return request;
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
List<Object> procedureParams) {
return this.executeStoredProcedure(storedProcedureLink, null, procedureParams);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<TransactionalBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy)
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<TransactionalBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
String query, CosmosQueryRequestOptions options) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, CosmosQueryRequestOptions options) {
return queryUsers(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy());
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return readFeed(options, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
CosmosQueryRequestOptions options) {
return queryPermissions(userLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(CosmosQueryRequestOptions options) {
return readFeed(options, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T extends Resource> Flux<FeedResponse<T>> readFeed(CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) {
if (options == null) {
options = new CosmosQueryRequestOptions();
}
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
final CosmosQueryRequestOptions finalCosmosQueryRequestOptions = options;
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalCosmosQueryRequestOptions);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> ObservableHelper
.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)),
this.resetSessionTokenRetryPolicy.getRequestPolicy());
return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, CosmosQueryRequestOptions options) {
return queryOffers(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
@Override
public DatabaseAccount getLatestDatabaseAccount() {
return this.globalEndpointManager.getLatestDatabaseAccount();
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeaders(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.UseGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((request.getOperationType() == OperationType.Query || request.getOperationType() == OperationType.SqlQuery) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null);
return valueHolderMono.map(RxDocumentClientImpl::toFeedRanges);
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
throw new IllegalStateException("PartitionKeyRange list cannot be null");
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangePartitionKeyRangeImpl(pkRange.getId());
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private ConcurrentMap<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final HttpClient reactorHttpClient;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs,
credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs,
credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs,
credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length <= 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled) {
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.getAndDecrement();
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
return BridgeInternal.createCosmosDiagnostics(this);
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid");
throw new RuntimeException("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid");
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
public void init() {
try {
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy);
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
clientTelemetry = new ClientTelemetry(null, UUID.randomUUID().toString(),
ManagementFactory.getRuntimeMXBean().getName(), userAgentContainer.getUserAgent(),
connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(),
null, null, httpClient(), connectionPolicy.isClientTelemetryEnabled());
clientTelemetry.init();
this.queryPlanCache = new ConcurrentHashMap<>();
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled
);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withRequestTimeout(this.connectionPolicy.getRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig);
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
false
);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public URI getWriteEndpoint() {
return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null);
}
@Override
public URI getReadEndpoint() {
return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(CosmosQueryRequestOptions options) {
return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private <T extends Resource> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this);
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
options);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(resourceLink, sqlQuery, options, klass, resourceTypeEnum, queryClient, activityId),
invalidPartitionExceptionRetryPolicy);
}
private <T extends Resource> Flux<FeedResponse<T>> createQueryInternal(
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(this, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedDocumentQueryExecutionContext) {
queryInfo = ((PipelinedDocumentQueryExecutionContext<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
return iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, CosmosQueryRequestOptions options) {
return queryDatabases(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeaders(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeaders(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeaders(request, RequestVerb.GET)
.flatMap(gatewayProxy::processMessage);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeaders(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
CosmosQueryRequestOptions options) {
return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if(options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
return headers;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private static PartitionKeyInternal extractPartitionKeyValueFromDocument(
InternalObjectNode document,
PartitionKeyDefinition partitionKeyDefinition) {
if (partitionKeyDefinition != null) {
String path = partitionKeyDefinition.getPaths().iterator().next();
List<String> parts = PathParser.getPathParts(path);
if (parts.size() >= 1) {
Object value = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, parts);
if (value == null || value.getClass() == ObjectNode.class) {
value = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
}
if (value instanceof PartitionKeyInternal) {
return (PartitionKeyInternal) value;
} else {
return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false);
}
}
}
return null;
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = BridgeInternal.serializeJsonToByteBuffer(document, mapper);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
return request;
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy retryPolicy) {
return populateHeaders(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (requestPopulated.requestContext != null && retryPolicy.getRetryCount() > 0) {
retryPolicy.updateEndTime();
requestPopulated.requestContext.updateRetryContext(retryPolicy, true);
}
return storeProxy.processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeaders(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeaders(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.PATCH);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
@Override
public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy));
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> upsert(request, retryPolicyInstance));
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> patchDocumentInternal(documentLink, cosmosPatchOperations, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
return requestObs.flatMap(req -> patch(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, null, options, requestRetryPolicy), requestRetryPolicy);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, internalObjectNode, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, internalObjectNode, options, collectionObs);
return requestObs.flatMap(req -> this
.delete(req, retryPolicyInstance)
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Document>> readDocuments(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", options);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
CosmosQueryRequestOptions options,
Class<T> klass) {
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap =
new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
itemIdentityList
.forEach(itemIdentity -> {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap;
rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap,
collection.getPartitionKey());
return createReadManyQuery(
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
options,
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap))
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponse(finalList, headers);
return frp;
});
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector);
} else {
sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
if (!Objects.equals(idValue, pkValue)) {
continue;
}
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this);
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(this, queryClient, collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query, CosmosQueryRequestOptions options) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), options);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
return RxDocumentClientImpl.this.query(request).single();
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document);
}
@Override
public Flux<FeedResponse<Document>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
Document.class,
collection.getSelfLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public Flux<FeedResponse<Document>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this);
final CosmosQueryRequestOptions effectiveOptions =
ModelBridgeInternal.createQueryRequestOptions(options);
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
effectiveOptions);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
resourceLink,
querySpec,
ModelBridgeInternal.partitionKeyRangeIdInternal(effectiveOptions, range.getId()),
Document.class,
ResourceType.Document,
queryClient,
activityId);
});
},
invalidPartitionExceptionRetryPolicy);
});
}
@Override
public ConcurrentMap<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
return request;
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
List<Object> procedureParams) {
return this.executeStoredProcedure(storedProcedureLink, null, procedureParams);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<TransactionalBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy)
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<TransactionalBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
String query, CosmosQueryRequestOptions options) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, CosmosQueryRequestOptions options) {
return queryUsers(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy());
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return readFeed(options, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
CosmosQueryRequestOptions options) {
return queryPermissions(userLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(CosmosQueryRequestOptions options) {
return readFeed(options, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T extends Resource> Flux<FeedResponse<T>> readFeed(CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) {
if (options == null) {
options = new CosmosQueryRequestOptions();
}
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
final CosmosQueryRequestOptions finalCosmosQueryRequestOptions = options;
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalCosmosQueryRequestOptions);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> ObservableHelper
.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)),
this.resetSessionTokenRetryPolicy.getRequestPolicy());
return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, CosmosQueryRequestOptions options) {
return queryOffers(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
@Override
public DatabaseAccount getLatestDatabaseAccount() {
return this.globalEndpointManager.getLatestDatabaseAccount();
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeaders(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.UseGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((request.getOperationType() == OperationType.Query || request.getOperationType() == OperationType.SqlQuery) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null);
return valueHolderMono.map(RxDocumentClientImpl::toFeedRanges);
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
throw new IllegalStateException("PartitionKeyRange list cannot be null");
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangePartitionKeyRangeImpl(pkRange.getId());
}
} |
Fixed - was an attempted band-aid for a bug in the feed range continuation processing. But as bandaids ultimately always will do - it failed to fully mitigate the problem anyway. Finally took the time to fix this properly. | private Mono<RxDocumentServiceRequest> populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
final Range<String> continuationRange = request.getContinuationRange();
if (continuationRange != null &&
!continuationRange.equals(PartitionKeyInternalHelper.FullRange)) {
Mono<Range<String>> getEffectiveRangeTask = request.getFeedRange().getEffectiveRange(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request));
return getEffectiveRangeTask
.flatMap(feedRangeRange -> {
if (!Range.checkOverlapping(
continuationRange,
feedRangeRange)) {
return Mono.error(new NotFoundException(
String.format("Incompatible continuation token range '%s - %s' and feed range '%s - %s'.",
continuationRange.getMin(),
continuationRange.getMax(),
feedRangeRange.getMin(),
feedRangeRange.getMax())));
}
Range.MinComparator<String> minComparator = new Range.MinComparator<>();
Range.MaxComparator<String> maxComparator = new Range.MaxComparator<>();
boolean isMaxInclusive;
boolean isMinInclusive;
String effectiveMax;
String effectiveMin;
if (minComparator.compare(continuationRange, feedRangeRange) > 0) {
effectiveMin = continuationRange.getMin();
isMinInclusive = continuationRange.isMinInclusive();
} else {
effectiveMin = feedRangeRange.getMin();
isMinInclusive = feedRangeRange.isMinInclusive();
}
if (maxComparator.compare(continuationRange, feedRangeRange) <= 0) {
effectiveMax = continuationRange.getMax();
isMaxInclusive = continuationRange.isMaxInclusive();
} else {
effectiveMax = feedRangeRange.getMax();
isMaxInclusive = feedRangeRange.isMaxInclusive();
}
final Range<String> effectiveRange =
new Range<>(effectiveMin, effectiveMax, isMinInclusive, isMaxInclusive);
final FeedRangeInternal effectiveFeedRange =
effectiveRange.equals(feedRangeRange)
? request.getFeedRange() : new FeedRangeEpkImpl(effectiveRange);
return effectiveFeedRange
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
});
}
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
} | final Range<String> continuationRange = request.getContinuationRange(); | private Mono<RxDocumentServiceRequest> populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final HttpClient reactorHttpClient;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs,
credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs,
credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs,
credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length <= 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled) {
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.getAndDecrement();
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
return BridgeInternal.createCosmosDiagnostics(this);
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid");
throw new RuntimeException("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid");
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
public void init() {
try {
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy);
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
clientTelemetry = new ClientTelemetry(null, UUID.randomUUID().toString(),
ManagementFactory.getRuntimeMXBean().getName(), userAgentContainer.getUserAgent(),
connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(),
null, null, httpClient(), connectionPolicy.isClientTelemetryEnabled());
clientTelemetry.init();
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled
);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withRequestTimeout(this.connectionPolicy.getRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig);
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
false
);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public URI getWriteEndpoint() {
return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null);
}
@Override
public URI getReadEndpoint() {
return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(CosmosQueryRequestOptions options) {
return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private <T extends Resource> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this);
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
options);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(resourceLink, sqlQuery, options, klass, resourceTypeEnum, queryClient, activityId),
invalidPartitionExceptionRetryPolicy);
}
private <T extends Resource> Flux<FeedResponse<T>> createQueryInternal(
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createDocumentQueryExecutionContextAsync(this, queryClient, resourceTypeEnum, klass, sqlQuery , options, resourceLink, false, activityId);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedDocumentQueryExecutionContext) {
queryInfo = ((PipelinedDocumentQueryExecutionContext<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
return iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, CosmosQueryRequestOptions options) {
return queryDatabases(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeaders(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeaders(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeaders(request, RequestVerb.GET)
.flatMap(gatewayProxy::processMessage);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeaders(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
CosmosQueryRequestOptions options) {
return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if(options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
return headers;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private static PartitionKeyInternal extractPartitionKeyValueFromDocument(
InternalObjectNode document,
PartitionKeyDefinition partitionKeyDefinition) {
if (partitionKeyDefinition != null) {
String path = partitionKeyDefinition.getPaths().iterator().next();
List<String> parts = PathParser.getPathParts(path);
if (parts.size() >= 1) {
Object value = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, parts);
if (value == null || value.getClass() == ObjectNode.class) {
value = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
}
if (value instanceof PartitionKeyInternal) {
return (PartitionKeyInternal) value;
} else {
return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false);
}
}
}
return null;
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = BridgeInternal.serializeJsonToByteBuffer(document, mapper);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
return request;
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy retryPolicy) {
return populateHeaders(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (requestPopulated.requestContext != null && retryPolicy.getRetryCount() > 0) {
retryPolicy.updateEndTime();
requestPopulated.requestContext.updateRetryContext(retryPolicy, true);
}
return storeProxy.processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeaders(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeaders(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.PATCH);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
@Override
public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy));
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> upsert(request, retryPolicyInstance));
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> patchDocumentInternal(documentLink, cosmosPatchOperations, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
return requestObs.flatMap(req -> patch(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, null, options, requestRetryPolicy), requestRetryPolicy);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, internalObjectNode, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, internalObjectNode, options, collectionObs);
return requestObs.flatMap(req -> this
.delete(req, retryPolicyInstance)
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Document>> readDocuments(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", options);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
CosmosQueryRequestOptions options,
Class<T> klass) {
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap =
new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
itemIdentityList
.forEach(itemIdentity -> {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap;
rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap,
collection.getPartitionKey());
return createReadManyQuery(
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
options,
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap))
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponse(finalList, headers);
return frp;
});
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector);
} else {
sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
if (!Objects.equals(idValue, pkValue)) {
continue;
}
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this);
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(this, queryClient, collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query, CosmosQueryRequestOptions options) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), options);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
return RxDocumentClientImpl.this.query(request).single();
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document);
}
@Override
public Flux<FeedResponse<Document>> queryDocumentChangeFeed(
final String collectionLink,
final CosmosChangeFeedRequestOptions changeFeedOptions) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(this, ResourceType.Document,
Document.class, collectionLink, changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public Flux<FeedResponse<Document>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this);
final CosmosQueryRequestOptions effectiveOptions =
ModelBridgeInternal.createQueryRequestOptions(options);
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
effectiveOptions);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
resourceLink,
querySpec,
ModelBridgeInternal.partitionKeyRangeIdInternal(effectiveOptions, range.getId()),
Document.class,
ResourceType.Document,
queryClient,
activityId);
});
},
invalidPartitionExceptionRetryPolicy);
});
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
return request;
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
List<Object> procedureParams) {
return this.executeStoredProcedure(storedProcedureLink, null, procedureParams);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<TransactionalBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy)
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<TransactionalBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
String query, CosmosQueryRequestOptions options) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, CosmosQueryRequestOptions options) {
return queryUsers(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy());
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return readFeed(options, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
CosmosQueryRequestOptions options) {
return queryPermissions(userLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(CosmosQueryRequestOptions options) {
return readFeed(options, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T extends Resource> Flux<FeedResponse<T>> readFeed(CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) {
if (options == null) {
options = new CosmosQueryRequestOptions();
}
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
final CosmosQueryRequestOptions finalCosmosQueryRequestOptions = options;
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalCosmosQueryRequestOptions);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> ObservableHelper
.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)),
this.resetSessionTokenRetryPolicy.getRequestPolicy());
return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, CosmosQueryRequestOptions options) {
return queryOffers(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
@Override
public DatabaseAccount getLatestDatabaseAccount() {
return this.globalEndpointManager.getLatestDatabaseAccount();
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeaders(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.UseGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((request.getOperationType() == OperationType.Query || request.getOperationType() == OperationType.SqlQuery) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null);
return valueHolderMono.map(RxDocumentClientImpl::toFeedRanges);
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
throw new IllegalStateException("PartitionKeyRange list cannot be null");
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangePartitionKeyRangeImpl(pkRange.getId());
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private ConcurrentMap<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final HttpClient reactorHttpClient;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs,
credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs,
credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs,
credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length <= 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled) {
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.getAndDecrement();
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
return BridgeInternal.createCosmosDiagnostics(this);
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid");
throw new RuntimeException("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid");
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
public void init() {
try {
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy);
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
clientTelemetry = new ClientTelemetry(null, UUID.randomUUID().toString(),
ManagementFactory.getRuntimeMXBean().getName(), userAgentContainer.getUserAgent(),
connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(),
null, null, httpClient(), connectionPolicy.isClientTelemetryEnabled());
clientTelemetry.init();
this.queryPlanCache = new ConcurrentHashMap<>();
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled
);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withRequestTimeout(this.connectionPolicy.getRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig);
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
false
);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public URI getWriteEndpoint() {
return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null);
}
@Override
public URI getReadEndpoint() {
return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(CosmosQueryRequestOptions options) {
return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private <T extends Resource> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this);
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
options);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(resourceLink, sqlQuery, options, klass, resourceTypeEnum, queryClient, activityId),
invalidPartitionExceptionRetryPolicy);
}
private <T extends Resource> Flux<FeedResponse<T>> createQueryInternal(
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(this, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedDocumentQueryExecutionContext) {
queryInfo = ((PipelinedDocumentQueryExecutionContext<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
return iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, CosmosQueryRequestOptions options) {
return queryDatabases(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeaders(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeaders(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeaders(request, RequestVerb.GET)
.flatMap(gatewayProxy::processMessage);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeaders(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
CosmosQueryRequestOptions options) {
return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if(options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
return headers;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private static PartitionKeyInternal extractPartitionKeyValueFromDocument(
InternalObjectNode document,
PartitionKeyDefinition partitionKeyDefinition) {
if (partitionKeyDefinition != null) {
String path = partitionKeyDefinition.getPaths().iterator().next();
List<String> parts = PathParser.getPathParts(path);
if (parts.size() >= 1) {
Object value = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, parts);
if (value == null || value.getClass() == ObjectNode.class) {
value = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
}
if (value instanceof PartitionKeyInternal) {
return (PartitionKeyInternal) value;
} else {
return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false);
}
}
}
return null;
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = BridgeInternal.serializeJsonToByteBuffer(document, mapper);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
return request;
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy retryPolicy) {
return populateHeaders(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (requestPopulated.requestContext != null && retryPolicy.getRetryCount() > 0) {
retryPolicy.updateEndTime();
requestPopulated.requestContext.updateRetryContext(retryPolicy, true);
}
return storeProxy.processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeaders(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeaders(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.PATCH);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
@Override
public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy));
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> upsert(request, retryPolicyInstance));
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> patchDocumentInternal(documentLink, cosmosPatchOperations, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
return requestObs.flatMap(req -> patch(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, null, options, requestRetryPolicy), requestRetryPolicy);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, internalObjectNode, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, internalObjectNode, options, collectionObs);
return requestObs.flatMap(req -> this
.delete(req, retryPolicyInstance)
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Document>> readDocuments(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", options);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
CosmosQueryRequestOptions options,
Class<T> klass) {
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap =
new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
itemIdentityList
.forEach(itemIdentity -> {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap;
rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap,
collection.getPartitionKey());
return createReadManyQuery(
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
options,
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap))
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponse(finalList, headers);
return frp;
});
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector);
} else {
sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
if (!Objects.equals(idValue, pkValue)) {
continue;
}
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this);
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(this, queryClient, collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query, CosmosQueryRequestOptions options) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), options);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
return RxDocumentClientImpl.this.query(request).single();
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document);
}
@Override
public Flux<FeedResponse<Document>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
Document.class,
collection.getSelfLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public Flux<FeedResponse<Document>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this);
final CosmosQueryRequestOptions effectiveOptions =
ModelBridgeInternal.createQueryRequestOptions(options);
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
effectiveOptions);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
resourceLink,
querySpec,
ModelBridgeInternal.partitionKeyRangeIdInternal(effectiveOptions, range.getId()),
Document.class,
ResourceType.Document,
queryClient,
activityId);
});
},
invalidPartitionExceptionRetryPolicy);
});
}
@Override
public ConcurrentMap<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
return request;
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
List<Object> procedureParams) {
return this.executeStoredProcedure(storedProcedureLink, null, procedureParams);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<TransactionalBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy)
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<TransactionalBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
String query, CosmosQueryRequestOptions options) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, CosmosQueryRequestOptions options) {
return queryUsers(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy());
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return readFeed(options, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
CosmosQueryRequestOptions options) {
return queryPermissions(userLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(CosmosQueryRequestOptions options) {
return readFeed(options, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T extends Resource> Flux<FeedResponse<T>> readFeed(CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) {
if (options == null) {
options = new CosmosQueryRequestOptions();
}
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
final CosmosQueryRequestOptions finalCosmosQueryRequestOptions = options;
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalCosmosQueryRequestOptions);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> ObservableHelper
.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)),
this.resetSessionTokenRetryPolicy.getRequestPolicy());
return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, CosmosQueryRequestOptions options) {
return queryOffers(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
@Override
public DatabaseAccount getLatestDatabaseAccount() {
return this.globalEndpointManager.getLatestDatabaseAccount();
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeaders(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.UseGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((request.getOperationType() == OperationType.Query || request.getOperationType() == OperationType.SqlQuery) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null);
return valueHolderMono.map(RxDocumentClientImpl::toFeedRanges);
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
throw new IllegalStateException("PartitionKeyRange list cannot be null");
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangePartitionKeyRangeImpl(pkRange.getId());
}
} |
This existed in .NET because originally, we also used this for normal ReadFeed, probably not worth it here | private void moveToNextToken() {
final CompositeContinuationToken recentToken = this.compositeContinuationTokens.poll();
this.compositeContinuationTokens.add(recentToken);
if (this.compositeContinuationTokens.size() > 0) {
this.currentToken = this.compositeContinuationTokens.peek();
} else {
this.currentToken = null;
}
} | private void moveToNextToken() {
final CompositeContinuationToken recentToken = this.compositeContinuationTokens.poll();
this.compositeContinuationTokens.add(recentToken);
if (this.compositeContinuationTokens.size() > 0) {
this.currentToken = this.compositeContinuationTokens.peek();
} else {
this.currentToken = null;
}
} | class FeedRangeCompositeContinuationImpl extends FeedRangeContinuation {
private final static Logger LOGGER = LoggerFactory.getLogger(FeedRangeCompositeContinuationImpl.class);
private final Queue<CompositeContinuationToken> compositeContinuationTokens;
private CompositeContinuationToken currentToken;
private String initialNoResultsRange;
public FeedRangeCompositeContinuationImpl(
String containerRid,
FeedRangeInternal feedRange,
List<Range<String>> ranges) {
this(containerRid, feedRange, ranges, null);
}
public FeedRangeCompositeContinuationImpl(
String containerRid,
FeedRangeInternal feedRange,
List<Range<String>> ranges,
String continuation) {
this(containerRid, feedRange);
checkNotNull(ranges, "'ranges' must not be null");
if (ranges.size() == 0) {
throw new IllegalArgumentException("'ranges' must not be empty");
}
for (Range<String> range : ranges) {
this.compositeContinuationTokens.add(
FeedRangeCompositeContinuationImpl.createCompositeContinuationTokenForRange(
range.getMin(),
range.getMax(),
continuation)
);
}
this.currentToken = this.getCompositeContinuationTokens().peek();
}
public void populatePropertyBag() {
super.populatePropertyBag();
setProperty(
this,
Constants.Properties.FEED_RANGE_COMPOSITE_CONTINUATION_VERSION,
FeedRangeContinuationVersions.V1);
setProperty(
this,
Constants.Properties.FEED_RANGE_COMPOSITE_CONTINUATION_RESOURCE_ID,
this.getContainerRid());
if (this.compositeContinuationTokens.size() > 0) {
for (CompositeContinuationToken token : this.compositeContinuationTokens) {
ModelBridgeInternal.populatePropertyBag(token);
}
setProperty(
this,
Constants.Properties.FEED_RANGE_COMPOSITE_CONTINUATION_CONTINUATION,
this.compositeContinuationTokens);
}
if (this.feedRange != null) {
this.feedRange.setProperties(this, true);
}
}
private FeedRangeCompositeContinuationImpl(String containerRid, FeedRangeInternal feedRange) {
super(containerRid, feedRange);
this.compositeContinuationTokens = new LinkedList<>();
}
public Queue<CompositeContinuationToken> getCompositeContinuationTokens() {
return compositeContinuationTokens;
}
public CompositeContinuationToken getCurrentToken() {
return this.currentToken;
}
@Override
public FeedRangeInternal getFeedRange() {
if (!(this.feedRange instanceof FeedRangeEpkImpl)) {
return this.feedRange;
}
if (this.currentToken != null) {
return new FeedRangeEpkImpl(this.currentToken.getRange());
}
return null;
}
@Override
public CompositeContinuationToken getCurrentContinuationToken() {
CompositeContinuationToken tokenSnapshot = this.currentToken;
if (tokenSnapshot == null) {
return null;
}
return tokenSnapshot;
}
@Override
public void replaceContinuation(final String continuationToken) {
final CompositeContinuationToken continuationTokenSnapshot = this.currentToken;
if (continuationTokenSnapshot == null) {
return;
}
continuationTokenSnapshot.setToken(continuationToken);
this.moveToNextToken();
}
@Override
public boolean isDone() {
return this.compositeContinuationTokens.size() == 0;
}
@Override
public void validateContainer(final String containerRid) throws IllegalArgumentException {
if (Strings.isNullOrEmpty(containerRid) || !containerRid.equals(this.getContainerRid())) {
final String message = String.format(
"The continuation was generated for container %s but current container is %s.",
this.getContainerRid(), containerRid);
throw new IllegalArgumentException(message);
}
}
@Override
public <T extends Resource> ShouldRetryResult handleChangeFeedNotModified(final FeedResponse<T> response) {
checkNotNull(response, "Argument 'response' must not be null");
if (!ModelBridgeInternal.<T>noChanges(response)) {
this.initialNoResultsRange = null;
} else if (this.compositeContinuationTokens.size() > 1) {
final String eTag = this.currentToken.getToken();
if (this.initialNoResultsRange == null) {
this.initialNoResultsRange = this.currentToken.getRange().getMin();
this.replaceContinuation(eTag);
this.moveToNextToken();
return ShouldRetryResult.RETRY_NOW;
}
if (!this.initialNoResultsRange.equalsIgnoreCase(this.currentToken.getRange().getMin())) {
this.replaceContinuation(eTag);
this.moveToNextToken();
return ShouldRetryResult.RETRY_NOW;
}
}
return ShouldRetryResult.NO_RETRY;
}
@Override
public Mono<ShouldRetryResult> handleSplit(final RxDocumentClientImpl client,
final GoneException goneException) {
checkNotNull(client, "Argument 'client' must not be null");
checkNotNull(goneException, "Argument 'goeException' must not be null");
Integer nSubStatus = goneException.getSubStatusCode();
final boolean partitionSplit =
goneException.getStatusCode() == HttpConstants.StatusCodes.GONE &&
nSubStatus != null &&
(nSubStatus == HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE
|| nSubStatus == HttpConstants.SubStatusCodes.COMPLETING_SPLIT);
if (!partitionSplit) {
return Mono.just(ShouldRetryResult.NO_RETRY);
}
final RxPartitionKeyRangeCache partitionKeyRangeCache = client.getPartitionKeyRangeCache();
final Mono<Utils.ValueHolder<List<PartitionKeyRange>>> resolvedRangesTask =
this.tryGetOverlappingRanges(
partitionKeyRangeCache,
this.currentToken.getRange().getMin(),
this.currentToken.getRange().getMax(),
true);
return resolvedRangesTask.flatMap(resolvedRanges -> {
if (resolvedRanges.v != null && resolvedRanges.v.size() > 0) {
this.createChildRanges(resolvedRanges.v);
}
return Mono.just(ShouldRetryResult.RETRY_NOW);
});
}
/**
* Used for deserializtion only
*/
public static FeedRangeCompositeContinuationImpl createFromDeserializedTokens(
String containerRid,
FeedRangeInternal feedRange,
List<CompositeContinuationToken> deserializedTokens) {
FeedRangeCompositeContinuationImpl thisPtr =
new FeedRangeCompositeContinuationImpl(containerRid, feedRange);
checkNotNull(deserializedTokens, "'deserializedTokens' must not be null");
if (deserializedTokens.size() == 0) {
throw new IllegalArgumentException("'deserializedTokens' must not be empty");
}
thisPtr.compositeContinuationTokens.addAll(deserializedTokens);
thisPtr.currentToken = thisPtr.getCompositeContinuationTokens().peek();
return thisPtr;
}
public static FeedRangeContinuation parse(final String jsonString) throws IOException {
checkNotNull(jsonString, "Argument 'jsonString' must not be null");
final ObjectMapper mapper = Utils.getSimpleObjectMapper();
return mapper.readValue(jsonString, FeedRangeContinuation.class);
}
@Override
public String toString() {
try {
return Utils.getSimpleObjectMapper().writeValueAsString(this);
} catch (final IOException e) {
throw new IllegalArgumentException(
"Unable serialize the composite FeedRange continuation token into a JSON string",
e);
}
}
private void createChildRanges(final List<PartitionKeyRange> keyRanges) {
final PartitionKeyRange firstRange = keyRanges.get(0);
this.currentToken
.setRange(new Range<>(firstRange.getMinInclusive(),
firstRange.getMaxExclusive(), true, false));
final CompositeContinuationToken continuationAsComposite =
tryParseAsCompositeContinuationToken(
this.currentToken.getToken());
if (continuationAsComposite != null) {
continuationAsComposite.setRange(this.currentToken.getRange());
this.currentToken.setToken(continuationAsComposite.toJson());
final int size = keyRanges.size();
for (int i = 1; i < size; i++) {
final PartitionKeyRange keyRange = keyRanges.get(i);
continuationAsComposite.setRange(keyRange.toRange());
this.compositeContinuationTokens.add(createCompositeContinuationTokenForRange(
keyRange.getMinInclusive(), keyRange.getMaxExclusive(),
continuationAsComposite.toJson()));
}
} else {
final int size = keyRanges.size();
for (int i = 1; i < size; i++) {
final PartitionKeyRange keyRange = keyRanges.get(i);
this.compositeContinuationTokens.add(createCompositeContinuationTokenForRange(
keyRange.getMinInclusive(), keyRange.getMaxExclusive(),
this.currentToken.getToken()));
}
}
}
private static CompositeContinuationToken createCompositeContinuationTokenForRange(
String minInclusive,
String maxExclusive,
String token) {
return new CompositeContinuationToken(
token,
new Range<>(minInclusive, maxExclusive, true, false));
}
private Mono<Utils.ValueHolder<List<PartitionKeyRange>>> tryGetOverlappingRanges(
final RxPartitionKeyRangeCache partitionKeyRangeCache, final String min, final String max,
final Boolean forceRefresh) {
return partitionKeyRangeCache.tryGetOverlappingRangesAsync(null, this.getContainerRid(),
new Range<>(min, max, false, true), forceRefresh, null);
}
private static CompositeContinuationToken tryParseAsCompositeContinuationToken(
final String providedContinuation) {
try {
final ObjectMapper mapper = Utils.getSimpleObjectMapper();
if (providedContinuation == null) {
return null;
}
if (providedContinuation.trim().startsWith("[")) {
final List<CompositeContinuationToken> compositeContinuationTokens = Arrays
.asList(mapper.readValue(providedContinuation,
CompositeContinuationToken[].class));
if (compositeContinuationTokens.size() > 0) {
return compositeContinuationTokens.get(0);
}
return null;
} else if (providedContinuation.trim().startsWith("{")) {
return mapper.readValue(providedContinuation, CompositeContinuationToken.class);
}
return null;
} catch (final IOException ioError) {
LOGGER.debug(
"Failed to parse as composite continuation token JSON {}",
providedContinuation,
ioError);
return null;
}
}
@Override
public boolean equals(Object o) {
if (!(o instanceof FeedRangeCompositeContinuationImpl)) {
return false;
}
FeedRangeCompositeContinuationImpl other = (FeedRangeCompositeContinuationImpl)o;
return Objects.equals(this.feedRange, other.feedRange) &&
Objects.equals(this.getContainerRid(), other.getContainerRid()) &&
Objects.equals(this.initialNoResultsRange, other.initialNoResultsRange) &&
Objects.equals(this.currentToken, other.currentToken) &&
Objects.equals(this.compositeContinuationTokens, other.compositeContinuationTokens);
}
@Override
public int hashCode() {
return Objects.hash(
this.feedRange,
this.getContainerRid(),
this.initialNoResultsRange,
this.currentToken,
this.compositeContinuationTokens);
}
} | class FeedRangeCompositeContinuationImpl extends FeedRangeContinuation {
private final static Logger LOGGER = LoggerFactory.getLogger(FeedRangeCompositeContinuationImpl.class);
private final Queue<CompositeContinuationToken> compositeContinuationTokens;
private CompositeContinuationToken currentToken;
private String initialNoResultsRange;
public FeedRangeCompositeContinuationImpl(
String containerRid,
FeedRangeInternal feedRange,
List<Range<String>> ranges) {
this(containerRid, feedRange, ranges, null);
}
public FeedRangeCompositeContinuationImpl(
String containerRid,
FeedRangeInternal feedRange,
List<Range<String>> ranges,
String continuation) {
this(containerRid, feedRange);
checkNotNull(ranges, "'ranges' must not be null");
if (ranges.size() == 0) {
throw new IllegalArgumentException("'ranges' must not be empty");
}
for (Range<String> range : ranges) {
this.compositeContinuationTokens.add(
FeedRangeCompositeContinuationImpl.createCompositeContinuationTokenForRange(
range.getMin(),
range.getMax(),
continuation)
);
}
this.currentToken = this.getCompositeContinuationTokens().peek();
}
public void populatePropertyBag() {
super.populatePropertyBag();
setProperty(
this,
Constants.Properties.FEED_RANGE_COMPOSITE_CONTINUATION_VERSION,
FeedRangeContinuationVersions.V1);
setProperty(
this,
Constants.Properties.FEED_RANGE_COMPOSITE_CONTINUATION_RESOURCE_ID,
this.getContainerRid());
if (this.compositeContinuationTokens.size() > 0) {
for (CompositeContinuationToken token : this.compositeContinuationTokens) {
ModelBridgeInternal.populatePropertyBag(token);
}
setProperty(
this,
Constants.Properties.FEED_RANGE_COMPOSITE_CONTINUATION_CONTINUATION,
this.compositeContinuationTokens);
}
if (this.feedRange != null) {
this.feedRange.setProperties(this, true);
}
}
private FeedRangeCompositeContinuationImpl(String containerRid, FeedRangeInternal feedRange) {
super(containerRid, feedRange);
this.compositeContinuationTokens = new LinkedList<>();
}
public Queue<CompositeContinuationToken> getCompositeContinuationTokens() {
return compositeContinuationTokens;
}
public CompositeContinuationToken getCurrentToken() {
return this.currentToken;
}
@Override
public FeedRangeInternal getFeedRange() {
if (!(this.feedRange instanceof FeedRangeEpkImpl)) {
return this.feedRange;
}
if (this.currentToken != null) {
return new FeedRangeEpkImpl(this.currentToken.getRange());
}
return null;
}
@Override
public CompositeContinuationToken getCurrentContinuationToken() {
CompositeContinuationToken tokenSnapshot = this.currentToken;
if (tokenSnapshot == null) {
return null;
}
return tokenSnapshot;
}
@Override
public int getContinuationTokenCount() {
return this.compositeContinuationTokens.size();
}
@Override
public void replaceContinuation(final String continuationToken) {
final CompositeContinuationToken continuationTokenSnapshot = this.currentToken;
if (continuationTokenSnapshot == null) {
return;
}
continuationTokenSnapshot.setToken(continuationToken);
this.moveToNextToken();
}
@Override
public boolean isDone() {
return this.compositeContinuationTokens.size() == 0;
}
@Override
public void validateContainer(final String containerRid) throws IllegalArgumentException {
if (Strings.isNullOrEmpty(containerRid) || !containerRid.equals(this.getContainerRid())) {
final String message = String.format(
"The continuation was generated for container %s but current container is %s.",
this.getContainerRid(), containerRid);
throw new IllegalArgumentException(message);
}
}
@Override
public <T extends Resource> ShouldRetryResult handleChangeFeedNotModified(final FeedResponse<T> response) {
checkNotNull(response, "Argument 'response' must not be null");
if (!ModelBridgeInternal.<T>noChanges(response)) {
this.initialNoResultsRange = null;
} else if (this.compositeContinuationTokens.size() > 1) {
final String eTag = this.currentToken.getToken();
if (this.initialNoResultsRange == null) {
this.initialNoResultsRange = this.currentToken.getRange().getMin();
this.replaceContinuation(eTag);
this.moveToNextToken();
return ShouldRetryResult.RETRY_NOW;
}
if (!this.initialNoResultsRange.equalsIgnoreCase(this.currentToken.getRange().getMin())) {
this.replaceContinuation(eTag);
this.moveToNextToken();
return ShouldRetryResult.RETRY_NOW;
}
}
return ShouldRetryResult.NO_RETRY;
}
@Override
public Mono<ShouldRetryResult> handleSplit(final RxDocumentClientImpl client,
final GoneException goneException) {
checkNotNull(client, "Argument 'client' must not be null");
checkNotNull(goneException, "Argument 'goeException' must not be null");
Integer nSubStatus = goneException.getSubStatusCode();
final boolean partitionSplit =
goneException.getStatusCode() == HttpConstants.StatusCodes.GONE &&
nSubStatus != null &&
(nSubStatus == HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE
|| nSubStatus == HttpConstants.SubStatusCodes.COMPLETING_SPLIT);
if (!partitionSplit) {
return Mono.just(ShouldRetryResult.NO_RETRY);
}
final RxPartitionKeyRangeCache partitionKeyRangeCache = client.getPartitionKeyRangeCache();
final Mono<Utils.ValueHolder<List<PartitionKeyRange>>> resolvedRangesTask =
this.tryGetOverlappingRanges(
partitionKeyRangeCache,
this.currentToken.getRange().getMin(),
this.currentToken.getRange().getMax(),
true);
return resolvedRangesTask.flatMap(resolvedRanges -> {
if (resolvedRanges.v != null && resolvedRanges.v.size() > 0) {
this.createChildRanges(resolvedRanges.v);
}
return Mono.just(ShouldRetryResult.RETRY_NOW);
});
}
/**
* Used for deserializtion only
*/
public static FeedRangeCompositeContinuationImpl createFromDeserializedTokens(
String containerRid,
FeedRangeInternal feedRange,
List<CompositeContinuationToken> deserializedTokens) {
FeedRangeCompositeContinuationImpl thisPtr =
new FeedRangeCompositeContinuationImpl(containerRid, feedRange);
checkNotNull(deserializedTokens, "'deserializedTokens' must not be null");
if (deserializedTokens.size() == 0) {
throw new IllegalArgumentException("'deserializedTokens' must not be empty");
}
thisPtr.compositeContinuationTokens.addAll(deserializedTokens);
thisPtr.currentToken = thisPtr.getCompositeContinuationTokens().peek();
return thisPtr;
}
public static FeedRangeContinuation parse(final String jsonString) throws IOException {
checkNotNull(jsonString, "Argument 'jsonString' must not be null");
final ObjectMapper mapper = Utils.getSimpleObjectMapper();
return mapper.readValue(jsonString, FeedRangeContinuation.class);
}
@Override
public String toString() {
try {
return Utils.getSimpleObjectMapper().writeValueAsString(this);
} catch (final IOException e) {
throw new IllegalArgumentException(
"Unable serialize the composite FeedRange continuation token into a JSON string",
e);
}
}
private void createChildRanges(final List<PartitionKeyRange> keyRanges) {
final PartitionKeyRange firstRange = keyRanges.get(0);
this.currentToken
.setRange(new Range<>(firstRange.getMinInclusive(),
firstRange.getMaxExclusive(), true, false));
final CompositeContinuationToken continuationAsComposite =
tryParseAsCompositeContinuationToken(
this.currentToken.getToken());
if (continuationAsComposite != null) {
continuationAsComposite.setRange(this.currentToken.getRange());
this.currentToken.setToken(continuationAsComposite.toJson());
final int size = keyRanges.size();
for (int i = 1; i < size; i++) {
final PartitionKeyRange keyRange = keyRanges.get(i);
continuationAsComposite.setRange(keyRange.toRange());
this.compositeContinuationTokens.add(createCompositeContinuationTokenForRange(
keyRange.getMinInclusive(), keyRange.getMaxExclusive(),
continuationAsComposite.toJson()));
}
} else {
final int size = keyRanges.size();
for (int i = 1; i < size; i++) {
final PartitionKeyRange keyRange = keyRanges.get(i);
this.compositeContinuationTokens.add(createCompositeContinuationTokenForRange(
keyRange.getMinInclusive(), keyRange.getMaxExclusive(),
this.currentToken.getToken()));
}
}
}
private static CompositeContinuationToken createCompositeContinuationTokenForRange(
String minInclusive,
String maxExclusive,
String token) {
return new CompositeContinuationToken(
token,
new Range<>(minInclusive, maxExclusive, true, false));
}
private Mono<Utils.ValueHolder<List<PartitionKeyRange>>> tryGetOverlappingRanges(
final RxPartitionKeyRangeCache partitionKeyRangeCache, final String min, final String max,
final Boolean forceRefresh) {
return partitionKeyRangeCache.tryGetOverlappingRangesAsync(null, this.getContainerRid(),
new Range<>(min, max, false, true), forceRefresh, null);
}
private static CompositeContinuationToken tryParseAsCompositeContinuationToken(
final String providedContinuation) {
try {
final ObjectMapper mapper = Utils.getSimpleObjectMapper();
if (providedContinuation == null) {
return null;
}
if (providedContinuation.trim().startsWith("[")) {
final List<CompositeContinuationToken> compositeContinuationTokens = Arrays
.asList(mapper.readValue(providedContinuation,
CompositeContinuationToken[].class));
if (compositeContinuationTokens.size() > 0) {
return compositeContinuationTokens.get(0);
}
return null;
} else if (providedContinuation.trim().startsWith("{")) {
return mapper.readValue(providedContinuation, CompositeContinuationToken.class);
}
return null;
} catch (final IOException ioError) {
LOGGER.debug(
"Failed to parse as composite continuation token JSON {}",
providedContinuation,
ioError);
return null;
}
}
@Override
public boolean equals(Object o) {
if (!(o instanceof FeedRangeCompositeContinuationImpl)) {
return false;
}
FeedRangeCompositeContinuationImpl other = (FeedRangeCompositeContinuationImpl)o;
return Objects.equals(this.feedRange, other.feedRange) &&
Objects.equals(this.getContainerRid(), other.getContainerRid()) &&
Objects.equals(this.initialNoResultsRange, other.initialNoResultsRange) &&
Objects.equals(this.currentToken, other.currentToken) &&
Objects.equals(this.compositeContinuationTokens, other.compositeContinuationTokens);
}
@Override
public int hashCode() {
return Objects.hash(
this.feedRange,
this.getContainerRid(),
this.initialNoResultsRange,
this.currentToken,
this.compositeContinuationTokens);
}
} | |
Fixed in next iteration (remove the commented out if statement | private void moveToNextToken() {
final CompositeContinuationToken recentToken = this.compositeContinuationTokens.poll();
this.compositeContinuationTokens.add(recentToken);
if (this.compositeContinuationTokens.size() > 0) {
this.currentToken = this.compositeContinuationTokens.peek();
} else {
this.currentToken = null;
}
} | private void moveToNextToken() {
final CompositeContinuationToken recentToken = this.compositeContinuationTokens.poll();
this.compositeContinuationTokens.add(recentToken);
if (this.compositeContinuationTokens.size() > 0) {
this.currentToken = this.compositeContinuationTokens.peek();
} else {
this.currentToken = null;
}
} | class FeedRangeCompositeContinuationImpl extends FeedRangeContinuation {
private final static Logger LOGGER = LoggerFactory.getLogger(FeedRangeCompositeContinuationImpl.class);
private final Queue<CompositeContinuationToken> compositeContinuationTokens;
private CompositeContinuationToken currentToken;
private String initialNoResultsRange;
public FeedRangeCompositeContinuationImpl(
String containerRid,
FeedRangeInternal feedRange,
List<Range<String>> ranges) {
this(containerRid, feedRange, ranges, null);
}
public FeedRangeCompositeContinuationImpl(
String containerRid,
FeedRangeInternal feedRange,
List<Range<String>> ranges,
String continuation) {
this(containerRid, feedRange);
checkNotNull(ranges, "'ranges' must not be null");
if (ranges.size() == 0) {
throw new IllegalArgumentException("'ranges' must not be empty");
}
for (Range<String> range : ranges) {
this.compositeContinuationTokens.add(
FeedRangeCompositeContinuationImpl.createCompositeContinuationTokenForRange(
range.getMin(),
range.getMax(),
continuation)
);
}
this.currentToken = this.getCompositeContinuationTokens().peek();
}
public void populatePropertyBag() {
super.populatePropertyBag();
setProperty(
this,
Constants.Properties.FEED_RANGE_COMPOSITE_CONTINUATION_VERSION,
FeedRangeContinuationVersions.V1);
setProperty(
this,
Constants.Properties.FEED_RANGE_COMPOSITE_CONTINUATION_RESOURCE_ID,
this.getContainerRid());
if (this.compositeContinuationTokens.size() > 0) {
for (CompositeContinuationToken token : this.compositeContinuationTokens) {
ModelBridgeInternal.populatePropertyBag(token);
}
setProperty(
this,
Constants.Properties.FEED_RANGE_COMPOSITE_CONTINUATION_CONTINUATION,
this.compositeContinuationTokens);
}
if (this.feedRange != null) {
this.feedRange.setProperties(this, true);
}
}
private FeedRangeCompositeContinuationImpl(String containerRid, FeedRangeInternal feedRange) {
super(containerRid, feedRange);
this.compositeContinuationTokens = new LinkedList<>();
}
public Queue<CompositeContinuationToken> getCompositeContinuationTokens() {
return compositeContinuationTokens;
}
public CompositeContinuationToken getCurrentToken() {
return this.currentToken;
}
@Override
public FeedRangeInternal getFeedRange() {
if (!(this.feedRange instanceof FeedRangeEpkImpl)) {
return this.feedRange;
}
if (this.currentToken != null) {
return new FeedRangeEpkImpl(this.currentToken.getRange());
}
return null;
}
@Override
public CompositeContinuationToken getCurrentContinuationToken() {
CompositeContinuationToken tokenSnapshot = this.currentToken;
if (tokenSnapshot == null) {
return null;
}
return tokenSnapshot;
}
@Override
public void replaceContinuation(final String continuationToken) {
final CompositeContinuationToken continuationTokenSnapshot = this.currentToken;
if (continuationTokenSnapshot == null) {
return;
}
continuationTokenSnapshot.setToken(continuationToken);
this.moveToNextToken();
}
@Override
public boolean isDone() {
return this.compositeContinuationTokens.size() == 0;
}
@Override
public void validateContainer(final String containerRid) throws IllegalArgumentException {
if (Strings.isNullOrEmpty(containerRid) || !containerRid.equals(this.getContainerRid())) {
final String message = String.format(
"The continuation was generated for container %s but current container is %s.",
this.getContainerRid(), containerRid);
throw new IllegalArgumentException(message);
}
}
@Override
public <T extends Resource> ShouldRetryResult handleChangeFeedNotModified(final FeedResponse<T> response) {
checkNotNull(response, "Argument 'response' must not be null");
if (!ModelBridgeInternal.<T>noChanges(response)) {
this.initialNoResultsRange = null;
} else if (this.compositeContinuationTokens.size() > 1) {
final String eTag = this.currentToken.getToken();
if (this.initialNoResultsRange == null) {
this.initialNoResultsRange = this.currentToken.getRange().getMin();
this.replaceContinuation(eTag);
this.moveToNextToken();
return ShouldRetryResult.RETRY_NOW;
}
if (!this.initialNoResultsRange.equalsIgnoreCase(this.currentToken.getRange().getMin())) {
this.replaceContinuation(eTag);
this.moveToNextToken();
return ShouldRetryResult.RETRY_NOW;
}
}
return ShouldRetryResult.NO_RETRY;
}
@Override
public Mono<ShouldRetryResult> handleSplit(final RxDocumentClientImpl client,
final GoneException goneException) {
checkNotNull(client, "Argument 'client' must not be null");
checkNotNull(goneException, "Argument 'goeException' must not be null");
Integer nSubStatus = goneException.getSubStatusCode();
final boolean partitionSplit =
goneException.getStatusCode() == HttpConstants.StatusCodes.GONE &&
nSubStatus != null &&
(nSubStatus == HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE
|| nSubStatus == HttpConstants.SubStatusCodes.COMPLETING_SPLIT);
if (!partitionSplit) {
return Mono.just(ShouldRetryResult.NO_RETRY);
}
final RxPartitionKeyRangeCache partitionKeyRangeCache = client.getPartitionKeyRangeCache();
final Mono<Utils.ValueHolder<List<PartitionKeyRange>>> resolvedRangesTask =
this.tryGetOverlappingRanges(
partitionKeyRangeCache,
this.currentToken.getRange().getMin(),
this.currentToken.getRange().getMax(),
true);
return resolvedRangesTask.flatMap(resolvedRanges -> {
if (resolvedRanges.v != null && resolvedRanges.v.size() > 0) {
this.createChildRanges(resolvedRanges.v);
}
return Mono.just(ShouldRetryResult.RETRY_NOW);
});
}
/**
* Used for deserializtion only
*/
public static FeedRangeCompositeContinuationImpl createFromDeserializedTokens(
String containerRid,
FeedRangeInternal feedRange,
List<CompositeContinuationToken> deserializedTokens) {
FeedRangeCompositeContinuationImpl thisPtr =
new FeedRangeCompositeContinuationImpl(containerRid, feedRange);
checkNotNull(deserializedTokens, "'deserializedTokens' must not be null");
if (deserializedTokens.size() == 0) {
throw new IllegalArgumentException("'deserializedTokens' must not be empty");
}
thisPtr.compositeContinuationTokens.addAll(deserializedTokens);
thisPtr.currentToken = thisPtr.getCompositeContinuationTokens().peek();
return thisPtr;
}
public static FeedRangeContinuation parse(final String jsonString) throws IOException {
checkNotNull(jsonString, "Argument 'jsonString' must not be null");
final ObjectMapper mapper = Utils.getSimpleObjectMapper();
return mapper.readValue(jsonString, FeedRangeContinuation.class);
}
@Override
public String toString() {
try {
return Utils.getSimpleObjectMapper().writeValueAsString(this);
} catch (final IOException e) {
throw new IllegalArgumentException(
"Unable serialize the composite FeedRange continuation token into a JSON string",
e);
}
}
private void createChildRanges(final List<PartitionKeyRange> keyRanges) {
final PartitionKeyRange firstRange = keyRanges.get(0);
this.currentToken
.setRange(new Range<>(firstRange.getMinInclusive(),
firstRange.getMaxExclusive(), true, false));
final CompositeContinuationToken continuationAsComposite =
tryParseAsCompositeContinuationToken(
this.currentToken.getToken());
if (continuationAsComposite != null) {
continuationAsComposite.setRange(this.currentToken.getRange());
this.currentToken.setToken(continuationAsComposite.toJson());
final int size = keyRanges.size();
for (int i = 1; i < size; i++) {
final PartitionKeyRange keyRange = keyRanges.get(i);
continuationAsComposite.setRange(keyRange.toRange());
this.compositeContinuationTokens.add(createCompositeContinuationTokenForRange(
keyRange.getMinInclusive(), keyRange.getMaxExclusive(),
continuationAsComposite.toJson()));
}
} else {
final int size = keyRanges.size();
for (int i = 1; i < size; i++) {
final PartitionKeyRange keyRange = keyRanges.get(i);
this.compositeContinuationTokens.add(createCompositeContinuationTokenForRange(
keyRange.getMinInclusive(), keyRange.getMaxExclusive(),
this.currentToken.getToken()));
}
}
}
private static CompositeContinuationToken createCompositeContinuationTokenForRange(
String minInclusive,
String maxExclusive,
String token) {
return new CompositeContinuationToken(
token,
new Range<>(minInclusive, maxExclusive, true, false));
}
private Mono<Utils.ValueHolder<List<PartitionKeyRange>>> tryGetOverlappingRanges(
final RxPartitionKeyRangeCache partitionKeyRangeCache, final String min, final String max,
final Boolean forceRefresh) {
return partitionKeyRangeCache.tryGetOverlappingRangesAsync(null, this.getContainerRid(),
new Range<>(min, max, false, true), forceRefresh, null);
}
private static CompositeContinuationToken tryParseAsCompositeContinuationToken(
final String providedContinuation) {
try {
final ObjectMapper mapper = Utils.getSimpleObjectMapper();
if (providedContinuation == null) {
return null;
}
if (providedContinuation.trim().startsWith("[")) {
final List<CompositeContinuationToken> compositeContinuationTokens = Arrays
.asList(mapper.readValue(providedContinuation,
CompositeContinuationToken[].class));
if (compositeContinuationTokens.size() > 0) {
return compositeContinuationTokens.get(0);
}
return null;
} else if (providedContinuation.trim().startsWith("{")) {
return mapper.readValue(providedContinuation, CompositeContinuationToken.class);
}
return null;
} catch (final IOException ioError) {
LOGGER.debug(
"Failed to parse as composite continuation token JSON {}",
providedContinuation,
ioError);
return null;
}
}
@Override
public boolean equals(Object o) {
if (!(o instanceof FeedRangeCompositeContinuationImpl)) {
return false;
}
FeedRangeCompositeContinuationImpl other = (FeedRangeCompositeContinuationImpl)o;
return Objects.equals(this.feedRange, other.feedRange) &&
Objects.equals(this.getContainerRid(), other.getContainerRid()) &&
Objects.equals(this.initialNoResultsRange, other.initialNoResultsRange) &&
Objects.equals(this.currentToken, other.currentToken) &&
Objects.equals(this.compositeContinuationTokens, other.compositeContinuationTokens);
}
@Override
public int hashCode() {
return Objects.hash(
this.feedRange,
this.getContainerRid(),
this.initialNoResultsRange,
this.currentToken,
this.compositeContinuationTokens);
}
} | class FeedRangeCompositeContinuationImpl extends FeedRangeContinuation {
private final static Logger LOGGER = LoggerFactory.getLogger(FeedRangeCompositeContinuationImpl.class);
private final Queue<CompositeContinuationToken> compositeContinuationTokens;
private CompositeContinuationToken currentToken;
private String initialNoResultsRange;
public FeedRangeCompositeContinuationImpl(
String containerRid,
FeedRangeInternal feedRange,
List<Range<String>> ranges) {
this(containerRid, feedRange, ranges, null);
}
public FeedRangeCompositeContinuationImpl(
String containerRid,
FeedRangeInternal feedRange,
List<Range<String>> ranges,
String continuation) {
this(containerRid, feedRange);
checkNotNull(ranges, "'ranges' must not be null");
if (ranges.size() == 0) {
throw new IllegalArgumentException("'ranges' must not be empty");
}
for (Range<String> range : ranges) {
this.compositeContinuationTokens.add(
FeedRangeCompositeContinuationImpl.createCompositeContinuationTokenForRange(
range.getMin(),
range.getMax(),
continuation)
);
}
this.currentToken = this.getCompositeContinuationTokens().peek();
}
public void populatePropertyBag() {
super.populatePropertyBag();
setProperty(
this,
Constants.Properties.FEED_RANGE_COMPOSITE_CONTINUATION_VERSION,
FeedRangeContinuationVersions.V1);
setProperty(
this,
Constants.Properties.FEED_RANGE_COMPOSITE_CONTINUATION_RESOURCE_ID,
this.getContainerRid());
if (this.compositeContinuationTokens.size() > 0) {
for (CompositeContinuationToken token : this.compositeContinuationTokens) {
ModelBridgeInternal.populatePropertyBag(token);
}
setProperty(
this,
Constants.Properties.FEED_RANGE_COMPOSITE_CONTINUATION_CONTINUATION,
this.compositeContinuationTokens);
}
if (this.feedRange != null) {
this.feedRange.setProperties(this, true);
}
}
private FeedRangeCompositeContinuationImpl(String containerRid, FeedRangeInternal feedRange) {
super(containerRid, feedRange);
this.compositeContinuationTokens = new LinkedList<>();
}
public Queue<CompositeContinuationToken> getCompositeContinuationTokens() {
return compositeContinuationTokens;
}
public CompositeContinuationToken getCurrentToken() {
return this.currentToken;
}
@Override
public FeedRangeInternal getFeedRange() {
if (!(this.feedRange instanceof FeedRangeEpkImpl)) {
return this.feedRange;
}
if (this.currentToken != null) {
return new FeedRangeEpkImpl(this.currentToken.getRange());
}
return null;
}
@Override
public CompositeContinuationToken getCurrentContinuationToken() {
CompositeContinuationToken tokenSnapshot = this.currentToken;
if (tokenSnapshot == null) {
return null;
}
return tokenSnapshot;
}
@Override
public int getContinuationTokenCount() {
return this.compositeContinuationTokens.size();
}
@Override
public void replaceContinuation(final String continuationToken) {
final CompositeContinuationToken continuationTokenSnapshot = this.currentToken;
if (continuationTokenSnapshot == null) {
return;
}
continuationTokenSnapshot.setToken(continuationToken);
this.moveToNextToken();
}
@Override
public boolean isDone() {
return this.compositeContinuationTokens.size() == 0;
}
@Override
public void validateContainer(final String containerRid) throws IllegalArgumentException {
if (Strings.isNullOrEmpty(containerRid) || !containerRid.equals(this.getContainerRid())) {
final String message = String.format(
"The continuation was generated for container %s but current container is %s.",
this.getContainerRid(), containerRid);
throw new IllegalArgumentException(message);
}
}
@Override
public <T extends Resource> ShouldRetryResult handleChangeFeedNotModified(final FeedResponse<T> response) {
checkNotNull(response, "Argument 'response' must not be null");
if (!ModelBridgeInternal.<T>noChanges(response)) {
this.initialNoResultsRange = null;
} else if (this.compositeContinuationTokens.size() > 1) {
final String eTag = this.currentToken.getToken();
if (this.initialNoResultsRange == null) {
this.initialNoResultsRange = this.currentToken.getRange().getMin();
this.replaceContinuation(eTag);
this.moveToNextToken();
return ShouldRetryResult.RETRY_NOW;
}
if (!this.initialNoResultsRange.equalsIgnoreCase(this.currentToken.getRange().getMin())) {
this.replaceContinuation(eTag);
this.moveToNextToken();
return ShouldRetryResult.RETRY_NOW;
}
}
return ShouldRetryResult.NO_RETRY;
}
@Override
public Mono<ShouldRetryResult> handleSplit(final RxDocumentClientImpl client,
final GoneException goneException) {
checkNotNull(client, "Argument 'client' must not be null");
checkNotNull(goneException, "Argument 'goeException' must not be null");
Integer nSubStatus = goneException.getSubStatusCode();
final boolean partitionSplit =
goneException.getStatusCode() == HttpConstants.StatusCodes.GONE &&
nSubStatus != null &&
(nSubStatus == HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE
|| nSubStatus == HttpConstants.SubStatusCodes.COMPLETING_SPLIT);
if (!partitionSplit) {
return Mono.just(ShouldRetryResult.NO_RETRY);
}
final RxPartitionKeyRangeCache partitionKeyRangeCache = client.getPartitionKeyRangeCache();
final Mono<Utils.ValueHolder<List<PartitionKeyRange>>> resolvedRangesTask =
this.tryGetOverlappingRanges(
partitionKeyRangeCache,
this.currentToken.getRange().getMin(),
this.currentToken.getRange().getMax(),
true);
return resolvedRangesTask.flatMap(resolvedRanges -> {
if (resolvedRanges.v != null && resolvedRanges.v.size() > 0) {
this.createChildRanges(resolvedRanges.v);
}
return Mono.just(ShouldRetryResult.RETRY_NOW);
});
}
/**
* Used for deserializtion only
*/
public static FeedRangeCompositeContinuationImpl createFromDeserializedTokens(
String containerRid,
FeedRangeInternal feedRange,
List<CompositeContinuationToken> deserializedTokens) {
FeedRangeCompositeContinuationImpl thisPtr =
new FeedRangeCompositeContinuationImpl(containerRid, feedRange);
checkNotNull(deserializedTokens, "'deserializedTokens' must not be null");
if (deserializedTokens.size() == 0) {
throw new IllegalArgumentException("'deserializedTokens' must not be empty");
}
thisPtr.compositeContinuationTokens.addAll(deserializedTokens);
thisPtr.currentToken = thisPtr.getCompositeContinuationTokens().peek();
return thisPtr;
}
public static FeedRangeContinuation parse(final String jsonString) throws IOException {
checkNotNull(jsonString, "Argument 'jsonString' must not be null");
final ObjectMapper mapper = Utils.getSimpleObjectMapper();
return mapper.readValue(jsonString, FeedRangeContinuation.class);
}
@Override
public String toString() {
try {
return Utils.getSimpleObjectMapper().writeValueAsString(this);
} catch (final IOException e) {
throw new IllegalArgumentException(
"Unable serialize the composite FeedRange continuation token into a JSON string",
e);
}
}
private void createChildRanges(final List<PartitionKeyRange> keyRanges) {
final PartitionKeyRange firstRange = keyRanges.get(0);
this.currentToken
.setRange(new Range<>(firstRange.getMinInclusive(),
firstRange.getMaxExclusive(), true, false));
final CompositeContinuationToken continuationAsComposite =
tryParseAsCompositeContinuationToken(
this.currentToken.getToken());
if (continuationAsComposite != null) {
continuationAsComposite.setRange(this.currentToken.getRange());
this.currentToken.setToken(continuationAsComposite.toJson());
final int size = keyRanges.size();
for (int i = 1; i < size; i++) {
final PartitionKeyRange keyRange = keyRanges.get(i);
continuationAsComposite.setRange(keyRange.toRange());
this.compositeContinuationTokens.add(createCompositeContinuationTokenForRange(
keyRange.getMinInclusive(), keyRange.getMaxExclusive(),
continuationAsComposite.toJson()));
}
} else {
final int size = keyRanges.size();
for (int i = 1; i < size; i++) {
final PartitionKeyRange keyRange = keyRanges.get(i);
this.compositeContinuationTokens.add(createCompositeContinuationTokenForRange(
keyRange.getMinInclusive(), keyRange.getMaxExclusive(),
this.currentToken.getToken()));
}
}
}
private static CompositeContinuationToken createCompositeContinuationTokenForRange(
String minInclusive,
String maxExclusive,
String token) {
return new CompositeContinuationToken(
token,
new Range<>(minInclusive, maxExclusive, true, false));
}
private Mono<Utils.ValueHolder<List<PartitionKeyRange>>> tryGetOverlappingRanges(
final RxPartitionKeyRangeCache partitionKeyRangeCache, final String min, final String max,
final Boolean forceRefresh) {
return partitionKeyRangeCache.tryGetOverlappingRangesAsync(null, this.getContainerRid(),
new Range<>(min, max, false, true), forceRefresh, null);
}
private static CompositeContinuationToken tryParseAsCompositeContinuationToken(
final String providedContinuation) {
try {
final ObjectMapper mapper = Utils.getSimpleObjectMapper();
if (providedContinuation == null) {
return null;
}
if (providedContinuation.trim().startsWith("[")) {
final List<CompositeContinuationToken> compositeContinuationTokens = Arrays
.asList(mapper.readValue(providedContinuation,
CompositeContinuationToken[].class));
if (compositeContinuationTokens.size() > 0) {
return compositeContinuationTokens.get(0);
}
return null;
} else if (providedContinuation.trim().startsWith("{")) {
return mapper.readValue(providedContinuation, CompositeContinuationToken.class);
}
return null;
} catch (final IOException ioError) {
LOGGER.debug(
"Failed to parse as composite continuation token JSON {}",
providedContinuation,
ioError);
return null;
}
}
@Override
public boolean equals(Object o) {
if (!(o instanceof FeedRangeCompositeContinuationImpl)) {
return false;
}
FeedRangeCompositeContinuationImpl other = (FeedRangeCompositeContinuationImpl)o;
return Objects.equals(this.feedRange, other.feedRange) &&
Objects.equals(this.getContainerRid(), other.getContainerRid()) &&
Objects.equals(this.initialNoResultsRange, other.initialNoResultsRange) &&
Objects.equals(this.currentToken, other.currentToken) &&
Objects.equals(this.compositeContinuationTokens, other.compositeContinuationTokens);
}
@Override
public int hashCode() {
return Objects.hash(
this.feedRange,
this.getContainerRid(),
this.initialNoResultsRange,
this.currentToken,
this.compositeContinuationTokens);
}
} | |
nit: java code style. | public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (!(e instanceof GoneException)) {
return Mono.just(ShouldRetryResult.noRetry());
}
if (this.state.getContinuation() == null)
{
final FeedRangeInternal feedRange = this.state.getFeedRange();
final Mono<Range<String>> effectiveRangeMono = feedRange.getEffectiveRange(
this.client.getPartitionKeyRangeCache(),
null,
this.client.getCollectionCache().resolveByRidAsync(
null,
this.state.getContainerRid(),
null)
);
return effectiveRangeMono
.map(effectiveRange -> {
return this.state.setContinuation(
FeedRangeContinuation.create(
this.state.getContainerRid(),
this.state.getFeedRange(),
effectiveRange));
})
.flatMap(state -> state.getContinuation().handleSplit(client, (GoneException)e));
}
return this.state.getContinuation().handleSplit(client, (GoneException)e);
} | { | public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (!(e instanceof GoneException)) {
return Mono.just(ShouldRetryResult.noRetry());
}
if (this.state.getContinuation() == null) {
final FeedRangeInternal feedRange = this.state.getFeedRange();
final Mono<Range<String>> effectiveRangeMono = feedRange.getEffectiveRange(
this.client.getPartitionKeyRangeCache(),
null,
this.client.getCollectionCache().resolveByRidAsync(
null,
this.state.getContainerRid(),
null)
);
return effectiveRangeMono
.map(effectiveRange -> {
return this.state.setContinuation(
FeedRangeContinuation.create(
this.state.getContainerRid(),
this.state.getFeedRange(),
effectiveRange));
})
.flatMap(state -> state.getContinuation().handleSplit(client, (GoneException)e));
}
return this.state.getContinuation().handleSplit(client, (GoneException)e);
} | class FeedRangeContinuationSplitRetryPolicy extends RetryPolicyWithDiagnostics {
private final ChangeFeedState state;
private final RxDocumentClientImpl client;
public FeedRangeContinuationSplitRetryPolicy(
RxDocumentClientImpl client,
ChangeFeedState state) {
this.client = client;
this.state = state;
}
@Override
} | class FeedRangeContinuationSplitRetryPolicy extends RetryPolicyWithDiagnostics {
private final ChangeFeedState state;
private final RxDocumentClientImpl client;
public FeedRangeContinuationSplitRetryPolicy(
RxDocumentClientImpl client,
ChangeFeedState state) {
this.client = client;
this.state = state;
}
@Override
} |
Fixed | public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (!(e instanceof GoneException)) {
return Mono.just(ShouldRetryResult.noRetry());
}
if (this.state.getContinuation() == null)
{
final FeedRangeInternal feedRange = this.state.getFeedRange();
final Mono<Range<String>> effectiveRangeMono = feedRange.getEffectiveRange(
this.client.getPartitionKeyRangeCache(),
null,
this.client.getCollectionCache().resolveByRidAsync(
null,
this.state.getContainerRid(),
null)
);
return effectiveRangeMono
.map(effectiveRange -> {
return this.state.setContinuation(
FeedRangeContinuation.create(
this.state.getContainerRid(),
this.state.getFeedRange(),
effectiveRange));
})
.flatMap(state -> state.getContinuation().handleSplit(client, (GoneException)e));
}
return this.state.getContinuation().handleSplit(client, (GoneException)e);
} | { | public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (!(e instanceof GoneException)) {
return Mono.just(ShouldRetryResult.noRetry());
}
if (this.state.getContinuation() == null) {
final FeedRangeInternal feedRange = this.state.getFeedRange();
final Mono<Range<String>> effectiveRangeMono = feedRange.getEffectiveRange(
this.client.getPartitionKeyRangeCache(),
null,
this.client.getCollectionCache().resolveByRidAsync(
null,
this.state.getContainerRid(),
null)
);
return effectiveRangeMono
.map(effectiveRange -> {
return this.state.setContinuation(
FeedRangeContinuation.create(
this.state.getContainerRid(),
this.state.getFeedRange(),
effectiveRange));
})
.flatMap(state -> state.getContinuation().handleSplit(client, (GoneException)e));
}
return this.state.getContinuation().handleSplit(client, (GoneException)e);
} | class FeedRangeContinuationSplitRetryPolicy extends RetryPolicyWithDiagnostics {
private final ChangeFeedState state;
private final RxDocumentClientImpl client;
public FeedRangeContinuationSplitRetryPolicy(
RxDocumentClientImpl client,
ChangeFeedState state) {
this.client = client;
this.state = state;
}
@Override
} | class FeedRangeContinuationSplitRetryPolicy extends RetryPolicyWithDiagnostics {
private final ChangeFeedState state;
private final RxDocumentClientImpl client;
public FeedRangeContinuationSplitRetryPolicy(
RxDocumentClientImpl client,
ChangeFeedState state) {
this.client = client;
this.state = state;
}
@Override
} |
1. It's better to add one space after `//`. 2. Here, `/* */` is better than `//`. 3. We can break line if link is too long. | public void handleError(ClientHttpResponse response) throws IOException {
if (!HttpStatus.BAD_REQUEST.equals(response.getStatusCode())) {
this.defaultErrorHandler.handleError(response);
}
OAuth2Error oauth2Error = this.readErrorFromWwwAuthenticate(response.getHeaders());
if (oauth2Error == null) {
oauth2Error = this.oauth2ErrorConverter.read(OAuth2Error.class, response);
}
throw new OAuth2AuthorizationException(oauth2Error);
} | public void handleError(ClientHttpResponse response) throws IOException {
if (!HttpStatus.BAD_REQUEST.equals(response.getStatusCode())) {
this.defaultErrorHandler.handleError(response);
}
OAuth2Error oauth2Error = this.readErrorFromWwwAuthenticate(response.getHeaders());
if (oauth2Error == null) {
oauth2Error = this.oauth2ErrorConverter.read(OAuth2Error.class, response);
}
/**
* Handle conditional access policy, step 1.
* https:
*/
throw new OAuth2AuthorizationException(oauth2Error);
} | class AADConditionalAccessResponseErrorHandler implements ResponseErrorHandler {
private final OAuth2ErrorHttpMessageConverter oauth2ErrorConverter = new OAuth2ErrorHttpMessageConverter();
private final ResponseErrorHandler defaultErrorHandler = new DefaultResponseErrorHandler();
protected AADConditionalAccessResponseErrorHandler() {
this.oauth2ErrorConverter.setErrorConverter(new AADOAuth2ErrorConverter());
}
@Override
public boolean hasError(ClientHttpResponse response) throws IOException {
return this.defaultErrorHandler.hasError(response);
}
@Override
private OAuth2Error readErrorFromWwwAuthenticate(HttpHeaders headers) {
String wwwAuthenticateHeader = headers.getFirst(HttpHeaders.WWW_AUTHENTICATE);
if (!StringUtils.hasText(wwwAuthenticateHeader)) {
return null;
}
BearerTokenError bearerTokenError;
try {
bearerTokenError = BearerTokenError.parse(wwwAuthenticateHeader);
} catch (Exception ex) {
return null;
}
String errorCode = bearerTokenError.getCode() != null
? bearerTokenError.getCode() : OAuth2ErrorCodes.SERVER_ERROR;
String errorDescription = bearerTokenError.getDescription();
String errorUri = bearerTokenError.getURI() != null
? bearerTokenError.getURI().toString() : null;
return new OAuth2Error(errorCode, errorDescription, errorUri);
}
private static class AADOAuth2ErrorConverter implements Converter<Map<String, String>, OAuth2Error> {
@Override
public OAuth2Error convert(Map<String, String> parameters) {
String errorCode = parameters.get("error");
String description = parameters.get("error_description");
String errorCodes = parameters.get("error_codes");
String timestamp = parameters.get("timestamp");
String traceId = parameters.get("trace_id");
String correlationId = parameters.get("correlation_id");
String uri = parameters.get("error_uri");
String subError = parameters.get("suberror");
String claims = parameters.get("claims");
return new AzureOAuth2Error(errorCode, description, errorCodes, timestamp, traceId, correlationId,
uri, subError, claims);
}
}
} | class AADConditionalAccessResponseErrorHandler implements ResponseErrorHandler {
private final OAuth2ErrorHttpMessageConverter oauth2ErrorConverter = new OAuth2ErrorHttpMessageConverter();
private final ResponseErrorHandler defaultErrorHandler = new DefaultResponseErrorHandler();
protected AADConditionalAccessResponseErrorHandler() {
this.oauth2ErrorConverter.setErrorConverter(new AADOAuth2ErrorConverter());
}
@Override
public boolean hasError(ClientHttpResponse response) throws IOException {
return this.defaultErrorHandler.hasError(response);
}
@Override
private OAuth2Error readErrorFromWwwAuthenticate(HttpHeaders headers) {
String wwwAuthenticateHeader = headers.getFirst(HttpHeaders.WWW_AUTHENTICATE);
if (!StringUtils.hasText(wwwAuthenticateHeader)) {
return null;
}
BearerTokenError bearerTokenError;
try {
bearerTokenError = BearerTokenError.parse(wwwAuthenticateHeader);
} catch (Exception ex) {
return null;
}
String errorCode = bearerTokenError.getCode() != null
? bearerTokenError.getCode() : OAuth2ErrorCodes.SERVER_ERROR;
String errorDescription = bearerTokenError.getDescription();
String errorUri = bearerTokenError.getURI() != null
? bearerTokenError.getURI().toString() : null;
return new OAuth2Error(errorCode, errorDescription, errorUri);
}
private static class AADOAuth2ErrorConverter implements Converter<Map<String, String>, OAuth2Error> {
@Override
public OAuth2Error convert(Map<String, String> parameters) {
String errorCode = parameters.get("error");
String description = parameters.get("error_description");
String errorCodes = parameters.get("error_codes");
String timestamp = parameters.get("timestamp");
String traceId = parameters.get("trace_id");
String correlationId = parameters.get("correlation_id");
String uri = parameters.get("error_uri");
String subError = parameters.get("suberror");
String claims = parameters.get("claims");
return new AzureOAuth2Error(errorCode, description, errorCodes, timestamp, traceId, correlationId,
uri, subError, claims);
}
}
} | |
I would use a different key such as `azure-log-string-to-sign` to ensure there is no overlap and to indicate it's Azure SDK specific | public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
if (context != null && Boolean.TRUE.equals(context.getData("Log-String-To-Sign").orElse(false))) {
logger.info("The string to sign computed by the SDK is: {}{}", stringToSign,
System.getProperty("line.separator"));
logger.warning("Please remember to disable 'Log-String-To-Sign' before going to production as this "
+ "string can potentially contain PII.");
}
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
} | if (context != null && Boolean.TRUE.equals(context.getData("Log-String-To-Sign").orElse(false))) { | public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(logger, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
} | class BlobSasImplUtil {
/**
* The SAS blob constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS blob snapshot constant.
*/
private static final String SAS_BLOB_SNAPSHOT_CONSTANT = "bs";
/**
* The SAS blob version constant.
*/
private static final String SAS_BLOB_VERSION_CONSTANT = "bv";
/**
* The SAS blob container constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private final ClientLogger logger = new ClientLogger(BlobSasImplUtil.class);
private String version;
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String containerName;
private String blobName;
private String resource;
private String snapshotId;
private String versionId;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private String authorizedAadObjectId;
private String correlationId;
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName) {
this(sasValues, containerName, null, null, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId) {
Objects.requireNonNull(sasValues);
if (snapshotId != null && versionId != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("'snapshot' and 'versionId' cannot be used at the same time."));
}
this.version = null; /* Setting this to null forces the latest service version - see ensureState. */
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.containerName = containerName;
this.blobName = blobName;
this.snapshotId = snapshotId;
this.versionId = versionId;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @return A String representing the Sas
*/
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
if (context != null && Boolean.TRUE.equals(context.getData("Log-String-To-Sign").orElse(false))) {
logger.info("The string to sign computed by the SDK is: {}{}", stringToSign,
System.getProperty("line.separator"));
logger.warning("Please remember to disable 'Log-String-To-Sign' before going to production as this "
+ "string can potentially contain PII.");
}
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, this.version);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no version, use latest.
* 2. If there is no identifier set, ensure expiryTime and permissions are set.
* 3. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 4. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (version == null) {
version = BlobSasServiceVersion.getLatest().getVersion();
}
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw logger.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(blobName)) {
resource = SAS_CONTAINER_CONSTANT;
} else if (snapshotId != null) {
resource = SAS_BLOB_SNAPSHOT_CONSTANT;
} else if (versionId != null) {
resource = SAS_BLOB_VERSION_CONSTANT;
} else {
resource = SAS_BLOB_CONSTANT;
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_BLOB_SNAPSHOT_CONSTANT:
case SAS_BLOB_VERSION_CONSTANT:
permissions = BlobSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = BlobContainerSasPermission.parse(permissions).toString();
break;
default:
logger.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(blobName)
? String.format("/blob/%s/%s", account, containerName)
: String.format("/blob/%s/%s/%s", account, containerName, blobName.replace("\\", "/"));
}
private String stringToSign(String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
version,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
private String stringToSign(final UserDelegationKey key, String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
version,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} | class BlobSasImplUtil {
/**
* The SAS blob constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS blob snapshot constant.
*/
private static final String SAS_BLOB_SNAPSHOT_CONSTANT = "bs";
/**
* The SAS blob version constant.
*/
private static final String SAS_BLOB_VERSION_CONSTANT = "bv";
/**
* The SAS blob container constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private final ClientLogger logger = new ClientLogger(BlobSasImplUtil.class);
private String version;
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String containerName;
private String blobName;
private String resource;
private String snapshotId;
private String versionId;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private String authorizedAadObjectId;
private String correlationId;
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName) {
this(sasValues, containerName, null, null, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId) {
Objects.requireNonNull(sasValues);
if (snapshotId != null && versionId != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("'snapshot' and 'versionId' cannot be used at the same time."));
}
this.version = null; /* Setting this to null forces the latest service version - see ensureState. */
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.containerName = containerName;
this.blobName = blobName;
this.snapshotId = snapshotId;
this.versionId = versionId;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(logger, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, this.version);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no version, use latest.
* 2. If there is no identifier set, ensure expiryTime and permissions are set.
* 3. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 4. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (version == null) {
version = BlobSasServiceVersion.getLatest().getVersion();
}
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw logger.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(blobName)) {
resource = SAS_CONTAINER_CONSTANT;
} else if (snapshotId != null) {
resource = SAS_BLOB_SNAPSHOT_CONSTANT;
} else if (versionId != null) {
resource = SAS_BLOB_VERSION_CONSTANT;
} else {
resource = SAS_BLOB_CONSTANT;
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_BLOB_SNAPSHOT_CONSTANT:
case SAS_BLOB_VERSION_CONSTANT:
permissions = BlobSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = BlobContainerSasPermission.parse(permissions).toString();
break;
default:
logger.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(blobName)
? String.format("/blob/%s/%s", account, containerName)
: String.format("/blob/%s/%s/%s", account, containerName, blobName.replace("\\", "/"));
}
private String stringToSign(String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
version,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
private String stringToSign(final UserDelegationKey key, String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
version,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} |
Since this is in multiple spots let's make it a method so if it needs to be updated only one location needs modification | public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
if (context != null && Boolean.TRUE.equals(context.getData("Log-String-To-Sign").orElse(false))) {
logger.info("The string to sign computed by the SDK is: {}{}", stringToSign,
System.getProperty("line.separator"));
logger.warning("Please remember to disable 'Log-String-To-Sign' before going to production as this "
+ "string can potentially contain PII.");
}
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
} | } | public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(logger, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
} | class BlobSasImplUtil {
/**
* The SAS blob constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS blob snapshot constant.
*/
private static final String SAS_BLOB_SNAPSHOT_CONSTANT = "bs";
/**
* The SAS blob version constant.
*/
private static final String SAS_BLOB_VERSION_CONSTANT = "bv";
/**
* The SAS blob container constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private final ClientLogger logger = new ClientLogger(BlobSasImplUtil.class);
private String version;
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String containerName;
private String blobName;
private String resource;
private String snapshotId;
private String versionId;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private String authorizedAadObjectId;
private String correlationId;
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName) {
this(sasValues, containerName, null, null, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId) {
Objects.requireNonNull(sasValues);
if (snapshotId != null && versionId != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("'snapshot' and 'versionId' cannot be used at the same time."));
}
this.version = null; /* Setting this to null forces the latest service version - see ensureState. */
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.containerName = containerName;
this.blobName = blobName;
this.snapshotId = snapshotId;
this.versionId = versionId;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
if (context != null && Boolean.TRUE.equals(context.getData("Log-String-To-Sign").orElse(false))) {
logger.info("The string to sign computed by the SDK is: {}{}", stringToSign,
System.getProperty("line.separator"));
logger.warning("Please remember to disable 'Log-String-To-Sign' before going to production as this "
+ "string can potentially contain PII.");
}
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @return A String representing the Sas
*/
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, this.version);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no version, use latest.
* 2. If there is no identifier set, ensure expiryTime and permissions are set.
* 3. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 4. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (version == null) {
version = BlobSasServiceVersion.getLatest().getVersion();
}
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw logger.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(blobName)) {
resource = SAS_CONTAINER_CONSTANT;
} else if (snapshotId != null) {
resource = SAS_BLOB_SNAPSHOT_CONSTANT;
} else if (versionId != null) {
resource = SAS_BLOB_VERSION_CONSTANT;
} else {
resource = SAS_BLOB_CONSTANT;
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_BLOB_SNAPSHOT_CONSTANT:
case SAS_BLOB_VERSION_CONSTANT:
permissions = BlobSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = BlobContainerSasPermission.parse(permissions).toString();
break;
default:
logger.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(blobName)
? String.format("/blob/%s/%s", account, containerName)
: String.format("/blob/%s/%s/%s", account, containerName, blobName.replace("\\", "/"));
}
private String stringToSign(String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
version,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
private String stringToSign(final UserDelegationKey key, String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
version,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} | class BlobSasImplUtil {
/**
* The SAS blob constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS blob snapshot constant.
*/
private static final String SAS_BLOB_SNAPSHOT_CONSTANT = "bs";
/**
* The SAS blob version constant.
*/
private static final String SAS_BLOB_VERSION_CONSTANT = "bv";
/**
* The SAS blob container constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private final ClientLogger logger = new ClientLogger(BlobSasImplUtil.class);
private String version;
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String containerName;
private String blobName;
private String resource;
private String snapshotId;
private String versionId;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private String authorizedAadObjectId;
private String correlationId;
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName) {
this(sasValues, containerName, null, null, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId) {
Objects.requireNonNull(sasValues);
if (snapshotId != null && versionId != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("'snapshot' and 'versionId' cannot be used at the same time."));
}
this.version = null; /* Setting this to null forces the latest service version - see ensureState. */
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.containerName = containerName;
this.blobName = blobName;
this.snapshotId = snapshotId;
this.versionId = versionId;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(logger, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, this.version);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no version, use latest.
* 2. If there is no identifier set, ensure expiryTime and permissions are set.
* 3. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 4. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (version == null) {
version = BlobSasServiceVersion.getLatest().getVersion();
}
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw logger.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(blobName)) {
resource = SAS_CONTAINER_CONSTANT;
} else if (snapshotId != null) {
resource = SAS_BLOB_SNAPSHOT_CONSTANT;
} else if (versionId != null) {
resource = SAS_BLOB_VERSION_CONSTANT;
} else {
resource = SAS_BLOB_CONSTANT;
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_BLOB_SNAPSHOT_CONSTANT:
case SAS_BLOB_VERSION_CONSTANT:
permissions = BlobSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = BlobContainerSasPermission.parse(permissions).toString();
break;
default:
logger.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(blobName)
? String.format("/blob/%s/%s", account, containerName)
: String.format("/blob/%s/%s/%s", account, containerName, blobName.replace("\\", "/"));
}
private String stringToSign(String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
version,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
private String stringToSign(final UserDelegationKey key, String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
version,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} |
Let's make this base message a constant as all parameters for the formatting are constant, that way we don't need to make additional calls to `String.format` and wait for this to be optimized by JIT | public static String convertStorageExceptionMessage(String message, HttpResponse response) {
if (response != null) {
if (response.getStatusCode() == 403) {
return String.format("If you are using a StorageSharedKeyCredential, and the server returned an "
+ "error message that says 'Signature did not match', you can compare the string to sign with"
+ " the one generated by the SDK. To log the string to sign, pass in the context key value pair "
+ "'%s': true to the appropriate method call.%n"
+ "If you are using a SAS token, and the server returned an error message that says "
+ "'Signature did not match', you can compare the string to sign with"
+ " the one generated by the SDK. To log the string to sign, pass in the context key value "
+ "pair '%s': true to the appropriate generateSas method call.%n"
+ "Please remember to disable '%s' before going to production as this string can potentially "
+ "contain PII.%n",
Constants.STORAGE_LOG_STRING_TO_SIGN, Constants.STORAGE_LOG_STRING_TO_SIGN,
Constants.STORAGE_LOG_STRING_TO_SIGN)
+ message;
}
if (response.getRequest() != null && response.getRequest().getHttpMethod() != null
&& response.getRequest().getHttpMethod().equals(HttpMethod.HEAD)
&& response.getHeaders().getValue(ERROR_CODE) != null) {
return message.replaceFirst("(empty body)", response.getHeaders().getValue(ERROR_CODE));
}
}
return message;
} | return String.format("If you are using a StorageSharedKeyCredential, and the server returned an " | public static String convertStorageExceptionMessage(String message, HttpResponse response) {
if (response != null) {
if (response.getStatusCode() == 403) {
return STORAGE_EXCEPTION_LOG_STRING_TO_SIGN_MESSAGE + message;
}
if (response.getRequest() != null && response.getRequest().getHttpMethod() != null
&& response.getRequest().getHttpMethod().equals(HttpMethod.HEAD)
&& response.getHeaders().getValue(ERROR_CODE) != null) {
return message.replaceFirst("(empty body)", response.getHeaders().getValue(ERROR_CODE));
}
}
return message;
} | class StorageImplUtils {
private static final ClientLogger LOGGER = new ClientLogger(StorageImplUtils.class);
private static final String ARGUMENT_NULL_OR_EMPTY =
"The argument must not be null or an empty string. Argument name: %s.";
private static final String PARAMETER_NOT_IN_RANGE = "The value of the parameter '%s' should be between %s and %s.";
private static final String NO_PATH_SEGMENTS = "URL %s does not contain path segments.";
/**
* Parses the query string into a key-value pair map that maintains key, query parameter key, order. The value is
* stored as a string (ex. key=val1,val2,val3 instead of key=[val1, val2, val3]).
*
* @param queryString Query string to parse
* @return a mapping of query string pieces as key-value pairs.
*/
public static Map<String, String> parseQueryString(final String queryString) {
return parseQueryStringHelper(queryString, Utility::urlDecode);
}
/**
* Parses the query string into a key-value pair map that maintains key, query parameter key, order. The value is
* stored as a parsed array (ex. key=[val1, val2, val3] instead of key=val1,val2,val3).
*
* @param queryString Query string to parse
* @return a mapping of query string pieces as key-value pairs.
*/
public static Map<String, String[]> parseQueryStringSplitValues(final String queryString) {
return parseQueryStringHelper(queryString, value -> {
String[] v = value.split(",");
String[] ret = new String[v.length];
for (int i = 0; i < v.length; i++) {
ret[i] = urlDecode(v[i]);
}
return ret;
});
}
private static <T> Map<String, T> parseQueryStringHelper(final String queryString,
Function<String, T> valueParser) {
TreeMap<String, T> pieces = new TreeMap<>();
if (CoreUtils.isNullOrEmpty(queryString)) {
return pieces;
}
for (String kvp : queryString.split("&")) {
int equalIndex = kvp.indexOf("=");
String key = urlDecode(kvp.substring(0, equalIndex).toLowerCase(Locale.ROOT));
T value = valueParser.apply(kvp.substring(equalIndex + 1));
pieces.putIfAbsent(key, value);
}
return pieces;
}
/**
* Blocks an asynchronous response with an optional timeout.
*
* @param response Asynchronous response to block
* @param timeout Optional timeout
* @param <T> Return type of the asynchronous response
* @return the value of the asynchronous response
* @throws RuntimeException If the asynchronous response doesn't complete before the timeout expires.
*/
public static <T> T blockWithOptionalTimeout(Mono<T> response, Duration timeout) {
if (timeout == null) {
return response.block();
} else {
return response.block(timeout);
}
}
/**
* Applies a timeout to a publisher if the given timeout is not null.
*
* @param publisher Mono to apply optional timeout to.
* @param timeout Optional timeout.
* @param <T> Return type of the Mono.
* @return Mono with an applied timeout, if any.
*/
public static <T> Mono<T> applyOptionalTimeout(Mono<T> publisher, Duration timeout) {
return timeout == null
? publisher
: publisher.timeout(timeout);
}
/**
* Applies a timeout to a publisher if the given timeout is not null.
*
* @param publisher Flux to apply optional timeout to.
* @param timeout Optional timeout.
* @param <T> Return type of the Flux.
* @return Flux with an applied timeout, if any.
*/
public static <T> Flux<T> applyOptionalTimeout(Flux<T> publisher, Duration timeout) {
return timeout == null
? publisher
: publisher.timeout(timeout);
}
/**
* Asserts that a value is not {@code null}.
*
* @param param Name of the parameter
* @param value Value of the parameter
* @throws NullPointerException If {@code value} is {@code null}
*/
public static void assertNotNull(final String param, final Object value) {
if (value == null) {
throw new NullPointerException(String.format(Locale.ROOT, ARGUMENT_NULL_OR_EMPTY, param));
}
}
/**
* Asserts that the specified number is in the valid range. The range is inclusive.
*
* @param param Name of the parameter
* @param value Value of the parameter
* @param min The minimum allowed value
* @param max The maximum allowed value
* @throws IllegalArgumentException If {@code value} is less than {@code min} or {@code value} is greater than
* {@code max}.
*/
public static void assertInBounds(final String param, final long value, final long min, final long max) {
if (value < min || value > max) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(Locale.ROOT,
PARAMETER_NOT_IN_RANGE, param, min, max)));
}
}
/**
* Computes a signature for the specified string using the HMAC-SHA256 algorithm.
*
* @param base64Key Base64 encoded key used to sign the string
* @param stringToSign UTF-8 encoded string to sign
* @return the HMAC-SHA256 encoded signature
* @throws RuntimeException If the HMAC-SHA256 algorithm isn't support, if the key isn't a valid Base64 encoded
* string, or the UTF-8 charset isn't supported.
*/
public static String computeHMac256(final String base64Key, final String stringToSign) {
try {
byte[] key = Base64.getDecoder().decode(base64Key);
Mac hmacSHA256 = Mac.getInstance("HmacSHA256");
hmacSHA256.init(new SecretKeySpec(key, "HmacSHA256"));
byte[] utf8Bytes = stringToSign.getBytes(StandardCharsets.UTF_8);
return Base64.getEncoder().encodeToString(hmacSHA256.doFinal(utf8Bytes));
} catch (NoSuchAlgorithmException | InvalidKeyException ex) {
throw new RuntimeException(ex);
}
}
/**
* Appends a string to the end of the passed URL's path.
*
* @param baseURL URL having a path appended
* @param name Name of the path
* @return a URL with the path appended.
* @throws IllegalArgumentException If {@code name} causes the URL to become malformed.
*/
public static URL appendToUrlPath(String baseURL, String name) {
UrlBuilder builder = UrlBuilder.parse(baseURL);
if (builder.getPath() == null) {
builder.setPath("/");
} else if (!builder.getPath().endsWith("/")) {
builder.setPath(builder.getPath() + "/");
}
builder.setPath(builder.getPath() + name);
try {
return builder.toUrl();
} catch (MalformedURLException ex) {
throw new IllegalArgumentException(ex);
}
}
/**
* Strips the last path segment from the passed URL.
*
* @param baseUrl URL having its last path segment stripped
* @return a URL with the path segment stripped.
* @throws IllegalArgumentException If stripping the last path segment causes the URL to become malformed or it
* doesn't contain any path segments.
*/
public static URL stripLastPathSegment(URL baseUrl) {
UrlBuilder builder = UrlBuilder.parse(baseUrl);
if (builder.getPath() == null || !builder.getPath().contains("/")) {
throw new IllegalArgumentException(String.format(Locale.ROOT, NO_PATH_SEGMENTS, baseUrl));
}
builder.setPath(builder.getPath().substring(0, builder.getPath().lastIndexOf("/")));
try {
return builder.toUrl();
} catch (MalformedURLException ex) {
throw new IllegalArgumentException(ex);
}
}
/**
* Strips the account name from host part of the URL object.
*
* @param url URL having its hostanme
* @return account name.
*/
public static String getAccountName(URL url) {
UrlBuilder builder = UrlBuilder.parse(url);
String accountName = null;
String host = builder.getHost();
if (!CoreUtils.isNullOrEmpty(host)) {
int accountNameIndex = host.indexOf('.');
if (accountNameIndex == -1) {
accountName = host;
} else {
accountName = host.substring(0, accountNameIndex);
}
}
return accountName;
}
/** Returns an empty string if value is {@code null}, otherwise returns value
* @param value The value to check and return.
* @return The value or empty string.
*/
public static String emptyIfNull(String value) {
return value == null ? "" : value;
}
/**
* Reads data from an input stream and writes it to an output stream.
* @param source {@link InputStream source}
* @param writeLength The length of data to write.
* @param destination {@link OutputStream destination}
* @throws IOException If an I/O error occurs.
*/
public static void copyToOutputStream(InputStream source, long writeLength, OutputStream destination)
throws IOException {
StorageImplUtils.assertNotNull("source", source);
StorageImplUtils.assertNotNull("destination", destination);
final byte[] retrievedBuff = new byte[Constants.BUFFER_COPY_LENGTH];
int nextCopy = (int) Math.min(retrievedBuff.length, writeLength);
int count = source.read(retrievedBuff, 0, nextCopy);
while (nextCopy > 0 && count != -1) {
destination.write(retrievedBuff, 0, count);
nextCopy = (int) Math.min(retrievedBuff.length, writeLength);
count = source.read(retrievedBuff, 0, nextCopy);
}
}
/**
* Logs the string to sign if a valid context is provided.
*
* @param logger {@link ClientLogger}
* @param stringToSign The string to sign to log.
* @param context Additional context to determine if the string to sign should be logged.
*/
public static void logStringToSign(ClientLogger logger, String stringToSign, Context context) {
if (context != null && Boolean.TRUE.equals(context.getData(Constants.STORAGE_LOG_STRING_TO_SIGN).orElse(false))) {
logger.info("The string to sign computed by the SDK is: {}{}", stringToSign,
System.lineSeparator());
logger.warning("Please remember to disable '{}' before going to production as this "
+ "string can potentially contain PII.", Constants.STORAGE_LOG_STRING_TO_SIGN);
}
}
/**
* Converts the storage exception message.
*
* @param message The storage exception message
* @param response The storage service response.
* @return The converted storage exception message.
*/
} | class StorageImplUtils {
private static final ClientLogger LOGGER = new ClientLogger(StorageImplUtils.class);
private static final String ARGUMENT_NULL_OR_EMPTY =
"The argument must not be null or an empty string. Argument name: %s.";
private static final String PARAMETER_NOT_IN_RANGE = "The value of the parameter '%s' should be between %s and %s.";
private static final String NO_PATH_SEGMENTS = "URL %s does not contain path segments.";
private static final String STRING_TO_SIGN_LOG_INFO_MESSAGE = "The string to sign computed by the SDK is: {}{}";
private static final String STRING_TO_SIGN_LOG_WARNING_MESSAGE = "Please remember to disable '{}' before going "
+ "to production as this string can potentially contain PII.";
private static final String STORAGE_EXCEPTION_LOG_STRING_TO_SIGN_MESSAGE = String.format(
"If you are using a StorageSharedKeyCredential, and the server returned an "
+ "error message that says 'Signature did not match', you can compare the string to sign with"
+ " the one generated by the SDK. To log the string to sign, pass in the context key value pair "
+ "'%s': true to the appropriate method call.%n"
+ "If you are using a SAS token, and the server returned an error message that says "
+ "'Signature did not match', you can compare the string to sign with"
+ " the one generated by the SDK. To log the string to sign, pass in the context key value "
+ "pair '%s': true to the appropriate generateSas method call.%n"
+ "Please remember to disable '%s' before going to production as this string can potentially "
+ "contain PII.%n",
Constants.STORAGE_LOG_STRING_TO_SIGN, Constants.STORAGE_LOG_STRING_TO_SIGN,
Constants.STORAGE_LOG_STRING_TO_SIGN);
/**
* Parses the query string into a key-value pair map that maintains key, query parameter key, order. The value is
* stored as a string (ex. key=val1,val2,val3 instead of key=[val1, val2, val3]).
*
* @param queryString Query string to parse
* @return a mapping of query string pieces as key-value pairs.
*/
public static Map<String, String> parseQueryString(final String queryString) {
return parseQueryStringHelper(queryString, Utility::urlDecode);
}
/**
* Parses the query string into a key-value pair map that maintains key, query parameter key, order. The value is
* stored as a parsed array (ex. key=[val1, val2, val3] instead of key=val1,val2,val3).
*
* @param queryString Query string to parse
* @return a mapping of query string pieces as key-value pairs.
*/
public static Map<String, String[]> parseQueryStringSplitValues(final String queryString) {
return parseQueryStringHelper(queryString, value -> {
String[] v = value.split(",");
String[] ret = new String[v.length];
for (int i = 0; i < v.length; i++) {
ret[i] = urlDecode(v[i]);
}
return ret;
});
}
private static <T> Map<String, T> parseQueryStringHelper(final String queryString,
Function<String, T> valueParser) {
TreeMap<String, T> pieces = new TreeMap<>();
if (CoreUtils.isNullOrEmpty(queryString)) {
return pieces;
}
for (String kvp : queryString.split("&")) {
int equalIndex = kvp.indexOf("=");
String key = urlDecode(kvp.substring(0, equalIndex).toLowerCase(Locale.ROOT));
T value = valueParser.apply(kvp.substring(equalIndex + 1));
pieces.putIfAbsent(key, value);
}
return pieces;
}
/**
* Blocks an asynchronous response with an optional timeout.
*
* @param response Asynchronous response to block
* @param timeout Optional timeout
* @param <T> Return type of the asynchronous response
* @return the value of the asynchronous response
* @throws RuntimeException If the asynchronous response doesn't complete before the timeout expires.
*/
public static <T> T blockWithOptionalTimeout(Mono<T> response, Duration timeout) {
if (timeout == null) {
return response.block();
} else {
return response.block(timeout);
}
}
/**
* Applies a timeout to a publisher if the given timeout is not null.
*
* @param publisher Mono to apply optional timeout to.
* @param timeout Optional timeout.
* @param <T> Return type of the Mono.
* @return Mono with an applied timeout, if any.
*/
public static <T> Mono<T> applyOptionalTimeout(Mono<T> publisher, Duration timeout) {
return timeout == null
? publisher
: publisher.timeout(timeout);
}
/**
* Applies a timeout to a publisher if the given timeout is not null.
*
* @param publisher Flux to apply optional timeout to.
* @param timeout Optional timeout.
* @param <T> Return type of the Flux.
* @return Flux with an applied timeout, if any.
*/
public static <T> Flux<T> applyOptionalTimeout(Flux<T> publisher, Duration timeout) {
return timeout == null
? publisher
: publisher.timeout(timeout);
}
/**
* Asserts that a value is not {@code null}.
*
* @param param Name of the parameter
* @param value Value of the parameter
* @throws NullPointerException If {@code value} is {@code null}
*/
public static void assertNotNull(final String param, final Object value) {
if (value == null) {
throw new NullPointerException(String.format(Locale.ROOT, ARGUMENT_NULL_OR_EMPTY, param));
}
}
/**
* Asserts that the specified number is in the valid range. The range is inclusive.
*
* @param param Name of the parameter
* @param value Value of the parameter
* @param min The minimum allowed value
* @param max The maximum allowed value
* @throws IllegalArgumentException If {@code value} is less than {@code min} or {@code value} is greater than
* {@code max}.
*/
public static void assertInBounds(final String param, final long value, final long min, final long max) {
if (value < min || value > max) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(Locale.ROOT,
PARAMETER_NOT_IN_RANGE, param, min, max)));
}
}
/**
* Computes a signature for the specified string using the HMAC-SHA256 algorithm.
*
* @param base64Key Base64 encoded key used to sign the string
* @param stringToSign UTF-8 encoded string to sign
* @return the HMAC-SHA256 encoded signature
* @throws RuntimeException If the HMAC-SHA256 algorithm isn't support, if the key isn't a valid Base64 encoded
* string, or the UTF-8 charset isn't supported.
*/
public static String computeHMac256(final String base64Key, final String stringToSign) {
try {
byte[] key = Base64.getDecoder().decode(base64Key);
Mac hmacSHA256 = Mac.getInstance("HmacSHA256");
hmacSHA256.init(new SecretKeySpec(key, "HmacSHA256"));
byte[] utf8Bytes = stringToSign.getBytes(StandardCharsets.UTF_8);
return Base64.getEncoder().encodeToString(hmacSHA256.doFinal(utf8Bytes));
} catch (NoSuchAlgorithmException | InvalidKeyException ex) {
throw new RuntimeException(ex);
}
}
/**
* Appends a string to the end of the passed URL's path.
*
* @param baseURL URL having a path appended
* @param name Name of the path
* @return a URL with the path appended.
* @throws IllegalArgumentException If {@code name} causes the URL to become malformed.
*/
public static URL appendToUrlPath(String baseURL, String name) {
UrlBuilder builder = UrlBuilder.parse(baseURL);
if (builder.getPath() == null) {
builder.setPath("/");
} else if (!builder.getPath().endsWith("/")) {
builder.setPath(builder.getPath() + "/");
}
builder.setPath(builder.getPath() + name);
try {
return builder.toUrl();
} catch (MalformedURLException ex) {
throw new IllegalArgumentException(ex);
}
}
/**
* Strips the last path segment from the passed URL.
*
* @param baseUrl URL having its last path segment stripped
* @return a URL with the path segment stripped.
* @throws IllegalArgumentException If stripping the last path segment causes the URL to become malformed or it
* doesn't contain any path segments.
*/
public static URL stripLastPathSegment(URL baseUrl) {
UrlBuilder builder = UrlBuilder.parse(baseUrl);
if (builder.getPath() == null || !builder.getPath().contains("/")) {
throw new IllegalArgumentException(String.format(Locale.ROOT, NO_PATH_SEGMENTS, baseUrl));
}
builder.setPath(builder.getPath().substring(0, builder.getPath().lastIndexOf("/")));
try {
return builder.toUrl();
} catch (MalformedURLException ex) {
throw new IllegalArgumentException(ex);
}
}
/**
* Strips the account name from host part of the URL object.
*
* @param url URL having its hostanme
* @return account name.
*/
public static String getAccountName(URL url) {
UrlBuilder builder = UrlBuilder.parse(url);
String accountName = null;
String host = builder.getHost();
if (!CoreUtils.isNullOrEmpty(host)) {
int accountNameIndex = host.indexOf('.');
if (accountNameIndex == -1) {
accountName = host;
} else {
accountName = host.substring(0, accountNameIndex);
}
}
return accountName;
}
/** Returns an empty string if value is {@code null}, otherwise returns value
* @param value The value to check and return.
* @return The value or empty string.
*/
public static String emptyIfNull(String value) {
return value == null ? "" : value;
}
/**
* Reads data from an input stream and writes it to an output stream.
* @param source {@link InputStream source}
* @param writeLength The length of data to write.
* @param destination {@link OutputStream destination}
* @throws IOException If an I/O error occurs.
*/
public static void copyToOutputStream(InputStream source, long writeLength, OutputStream destination)
throws IOException {
StorageImplUtils.assertNotNull("source", source);
StorageImplUtils.assertNotNull("destination", destination);
final byte[] retrievedBuff = new byte[Constants.BUFFER_COPY_LENGTH];
int nextCopy = (int) Math.min(retrievedBuff.length, writeLength);
int count = source.read(retrievedBuff, 0, nextCopy);
while (nextCopy > 0 && count != -1) {
destination.write(retrievedBuff, 0, count);
nextCopy = (int) Math.min(retrievedBuff.length, writeLength);
count = source.read(retrievedBuff, 0, nextCopy);
}
}
/**
* Logs the string to sign if a valid context is provided.
*
* @param logger {@link ClientLogger}
* @param stringToSign The string to sign to log.
* @param context Additional context to determine if the string to sign should be logged.
*/
public static void logStringToSign(ClientLogger logger, String stringToSign, Context context) {
if (context != null && Boolean.TRUE.equals(context.getData(Constants.STORAGE_LOG_STRING_TO_SIGN).orElse(false))) {
logger.info(STRING_TO_SIGN_LOG_INFO_MESSAGE, stringToSign, System.lineSeparator());
logger.warning(STRING_TO_SIGN_LOG_WARNING_MESSAGE, Constants.STORAGE_LOG_STRING_TO_SIGN);
}
}
/**
* Converts the storage exception message.
*
* @param message The storage exception message
* @param response The storage service response.
* @return The converted storage exception message.
*/
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.