comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
https://netty.io/4.0/api/io/netty/handler/codec/http/HttpResponseStatus.html#OK
void deleteQueue() { String queueName = testResourceNamer.randomName("test", 7); QueueDescription description = new QueueDescription().setMaxDeliveryCount(15); CreateQueueBody createEntity = new CreateQueueBody(); CreateQueueBodyContent content = new CreateQueueBodyContent() .setType("application/xml") .setQueueDescription(description); createEntity.setContent(content); logger.info("Creating queue: {}", queueName); Response<Object> response = queuesClient.putWithResponseAsync(queueName, createEntity, null, Context.NONE) .block(timeout); assertNotNull(response); StepVerifier.create(queuesClient.deleteWithResponseAsync(queueName, Context.NONE)) .assertNext(deletedResponse -> assertEquals(200, deletedResponse.getStatusCode())) .verifyComplete(); }
assertNotNull(response);
void deleteQueue() { String queueName = testResourceNamer.randomName("test", 7); QueueDescription description = new QueueDescription().setMaxDeliveryCount(15); CreateQueueBody createEntity = new CreateQueueBody(); CreateQueueBodyContent content = new CreateQueueBodyContent() .setType("application/xml") .setQueueDescription(description); createEntity.setContent(content); logger.info("Creating queue: {}", queueName); Response<Object> response = queuesClient.putWithResponseAsync(queueName, createEntity, null, Context.NONE) .block(timeout); assertNotNull(response); StepVerifier.create(queuesClient.deleteWithResponseAsync(queueName, Context.NONE)) .assertNext(deletedResponse -> { assertEquals(200, deletedResponse.getStatusCode()); }) .verifyComplete(); }
class ServiceBusManagementClientImplIntegrationTests extends TestBase { private final ClientLogger logger = new ClientLogger(ServiceBusManagementClientImplIntegrationTests.class); private final ServiceBusManagementSerializer serializer = new ServiceBusManagementSerializer(); private final Duration timeout = Duration.ofSeconds(30); private QueuesImpl queuesClient; private ServiceBusManagementClientImpl managementClient; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } @Override protected void beforeTest() { Assumptions.assumeTrue(getTestMode() != TestMode.PLAYBACK, "Current record/playback does not support persisting XML calls."); final ConnectionStringProperties properties = new ConnectionStringProperties(TestUtils.getConnectionString()); final ServiceBusSharedKeyCredential credential = new ServiceBusSharedKeyCredential( properties.getSharedAccessKeyName(), properties.getSharedAccessKey()); HttpPipeline pipeline = new HttpPipelineBuilder().policies( new UserAgentPolicy(), (context, next) -> { final String url = context.getHttpRequest().getUrl().toString(); return credential.getToken(new TokenRequestContext().addScopes(url)).flatMap(token -> { context.getHttpRequest().getHeaders().put("Authorization", token.getToken()); return next.process(); }); }, new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS) .addAllowedQueryParamName("api-version")), new RetryPolicy() ).build(); managementClient = new ServiceBusManagementClientImplBuilder() .serializer(serializer) .endpoint(properties.getEndpoint().getHost()) .apiVersion("2017-04") .pipeline(pipeline) .buildClient(); queuesClient = managementClient.getQueues(); } /** * Verifies we can get queue information. */ @Test void getQueue() { String queueName = TestUtils.getQueueName(); StepVerifier.create(queuesClient.getWithResponseAsync(queueName, true, Context.NONE)) .assertNext(response -> { final QueueDescriptionResponse deserialize = deserialize(response, QueueDescriptionResponse.class); assertNotNull(deserialize); assertNotNull(deserialize.getContent()); final QueueDescription properties = deserialize.getContent().getQueueDescription(); assertNotNull(properties); assertFalse(properties.getLockDuration().isZero()); }) .verifyComplete(); } /** * Verifies we can create a queue. */ @Test void createQueue() { String queueName = testResourceNamer.randomName("test", 7); QueueDescription description = new QueueDescription().setMaxDeliveryCount(15); CreateQueueBody createEntity = new CreateQueueBody(); CreateQueueBodyContent content = new CreateQueueBodyContent() .setType("application/xml") .setQueueDescription(description); createEntity.setContent(content); logger.info("Creating queue: {}", queueName); StepVerifier.create(queuesClient.putWithResponseAsync(queueName, createEntity, null, Context.NONE)) .assertNext(response -> { Object body = response.getValue(); QueueDescription deserialize = null; try { deserialize = new ServiceBusManagementSerializer() .deserialize(String.valueOf(body), QueueDescription.class); } catch (IOException e) { fail("An exception was thrown. " + e); } assertNotNull(deserialize); }) .verifyComplete(); } /** * Verifies we can delete a queue. */ @Test /** * Verifies that we can edit properties on an existing queue. */ @Test void editQueue() { final String queueName = "q-5"; final Response<Object> response = queuesClient.getWithResponseAsync(queueName, true, Context.NONE) .block(Duration.ofSeconds(30)); assertNotNull(response); final QueueDescriptionResponse deserialize = deserialize(response, QueueDescriptionResponse.class); final QueueDescription properties = deserialize.getContent().getQueueDescription(); final int maxDeliveryCount = properties.getMaxDeliveryCount(); final int newDeliveryCount = maxDeliveryCount + 5; final Duration lockDuration = properties.getLockDuration(); final Duration newLockDuration = lockDuration.plusSeconds(40); final Duration autoDeleteOnIdle = Duration.ofDays(5); properties.setMaxDeliveryCount(newDeliveryCount); properties.setLockDuration(newLockDuration); properties.setAutoDeleteOnIdle(autoDeleteOnIdle); CreateQueueBody updated = new CreateQueueBody().setContent( new CreateQueueBodyContent().setQueueDescription(properties).setType("application/xml")); StepVerifier.create(queuesClient.putWithResponseAsync(queueName, updated, "*", Context.NONE)) .assertNext(update -> { final QueueDescriptionResponse updatedProperties = deserialize(update, QueueDescriptionResponse.class); assertNotNull(updatedProperties); }).verifyComplete(); } /** * Verifies we can list queues. */ @Test void listQueues() { String entityType = "queues"; StepVerifier.create(managementClient.listEntitiesWithResponseAsync(entityType, 0, 100, Context.NONE)) .assertNext(response -> { Object body = response.getValue(); QueueDescriptionFeed deserialize = null; try { deserialize = new ServiceBusManagementSerializer() .deserialize(String.valueOf(body), QueueDescriptionFeed.class); } catch (IOException e) { fail("An exception was thrown. " + e); } assertNotNull(deserialize); assertNotNull(deserialize.getEntry()); assertTrue(deserialize.getEntry().size() > 2); }) .verifyComplete(); } private <T> T deserialize(Response<Object> response, Class<T> clazz) { final Object body = response.getValue(); final String contents = String.valueOf(body); final T deserialize; try { deserialize = serializer.deserialize(contents, clazz); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(String.format( "Exception while deserializing. Body: [%s]. Class: %s", contents, clazz), e)); } if (deserialize == null) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "'deserialize' should not be null. Body: [%s]. Class: [%s]", contents, clazz))); } return deserialize; } }
class ServiceBusManagementClientImplIntegrationTests extends TestBase { private final ClientLogger logger = new ClientLogger(ServiceBusManagementClientImplIntegrationTests.class); private final ServiceBusManagementSerializer serializer = new ServiceBusManagementSerializer(); private final Duration timeout = Duration.ofSeconds(30); private QueuesImpl queuesClient; private ServiceBusManagementClientImpl managementClient; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } @Override protected void beforeTest() { Assumptions.assumeTrue(getTestMode() != TestMode.PLAYBACK, "Current record/playback does not support persisting XML calls."); ConnectionStringProperties properties = new ConnectionStringProperties(TestUtils.getConnectionString()); ServiceBusSharedKeyCredential credential = new ServiceBusSharedKeyCredential( properties.getSharedAccessKeyName(), properties.getSharedAccessKey()); HttpPipeline pipeline = new HttpPipelineBuilder().policies( new UserAgentPolicy(), new ServiceBusTokenCredentialHttpPolicy(credential), new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS) .addAllowedQueryParamName("api-version")), new RetryPolicy() ).build(); managementClient = new ServiceBusManagementClientImplBuilder() .serializer(serializer) .endpoint(properties.getEndpoint().getHost()) .apiVersion("2017-04") .pipeline(pipeline) .buildClient(); queuesClient = managementClient.getQueues(); } /** * Verifies we can get queue information. */ @Test void getQueue() { String queueName = TestUtils.getQueueName(); StepVerifier.create(queuesClient.getWithResponseAsync(queueName, true, Context.NONE)) .assertNext(response -> { final QueueDescriptionResponse deserialize = deserialize(response, QueueDescriptionResponse.class); assertNotNull(deserialize); assertNotNull(deserialize.getContent()); final QueueDescription properties = deserialize.getContent().getQueueDescription(); assertNotNull(properties); assertFalse(properties.getLockDuration().isZero()); }) .verifyComplete(); } /** * Verifies we can create a queue. */ @Test void createQueue() { String queueName = testResourceNamer.randomName("test", 7); QueueDescription description = new QueueDescription().setMaxDeliveryCount(15); CreateQueueBody createEntity = new CreateQueueBody(); CreateQueueBodyContent content = new CreateQueueBodyContent() .setType("application/xml") .setQueueDescription(description); createEntity.setContent(content); logger.info("Creating queue: {}", queueName); StepVerifier.create(queuesClient.putWithResponseAsync(queueName, createEntity, null, Context.NONE)) .assertNext(response -> { Object body = response.getValue(); QueueDescription deserialize = null; try { deserialize = new ServiceBusManagementSerializer() .deserialize(String.valueOf(body), QueueDescription.class); } catch (IOException e) { fail("An exception was thrown. " + e); } assertNotNull(deserialize); }) .verifyComplete(); } /** * Verifies we can delete a queue. */ @Test /** * Verifies that we can edit properties on an existing queue. */ @Test void editQueue() { final String queueName = "q-5"; final Response<Object> response = queuesClient.getWithResponseAsync(queueName, true, Context.NONE) .block(Duration.ofSeconds(30)); assertNotNull(response); final QueueDescriptionResponse deserialize = deserialize(response, QueueDescriptionResponse.class); final QueueDescription properties = deserialize.getContent().getQueueDescription(); final int maxDeliveryCount = properties.getMaxDeliveryCount(); final int newDeliveryCount = maxDeliveryCount + 5; final Duration lockDuration = properties.getLockDuration(); final Duration newLockDuration = lockDuration.plusSeconds(40); final Duration autoDeleteOnIdle = Duration.ofDays(5); properties.setMaxDeliveryCount(newDeliveryCount); properties.setLockDuration(newLockDuration); properties.setAutoDeleteOnIdle(autoDeleteOnIdle); CreateQueueBody updated = new CreateQueueBody().setContent( new CreateQueueBodyContent().setQueueDescription(properties).setType("application/xml")); StepVerifier.create(queuesClient.putWithResponseAsync(queueName, updated, "*", Context.NONE)) .assertNext(update -> { final QueueDescriptionResponse updatedProperties = deserialize(update, QueueDescriptionResponse.class); assertNotNull(updatedProperties); }).verifyComplete(); } /** * Verifies we can list queues. */ @Test void listQueues() { String entityType = "queues"; StepVerifier.create(managementClient.listEntitiesWithResponseAsync(entityType, 0, 100, Context.NONE)) .assertNext(response -> { Object body = response.getValue(); QueueDescriptionFeed deserialize = null; try { deserialize = new ServiceBusManagementSerializer() .deserialize(String.valueOf(body), QueueDescriptionFeed.class); } catch (IOException e) { fail("An exception was thrown. " + e); } assertNotNull(deserialize); assertNotNull(deserialize.getEntry()); assertTrue(deserialize.getEntry().size() > 2); }) .verifyComplete(); } private <T> T deserialize(Response<Object> response, Class<T> clazz) { final Object body = response.getValue(); final String contents = String.valueOf(body); final T deserialize; try { deserialize = serializer.deserialize(contents, clazz); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(String.format( "Exception while deserializing. Body: [%s]. Class: %s", contents, clazz), e)); } if (deserialize == null) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "'deserialize' should not be null. Body: [%s]. Class: [%s]", contents, clazz))); } return deserialize; } }
Same comment as above. You can explicitly exclude dependencies and this would tie them to netty instead of another http client.
void deleteQueue() { String queueName = testResourceNamer.randomName("test", 7); QueueDescription description = new QueueDescription().setMaxDeliveryCount(15); CreateQueueBody createEntity = new CreateQueueBody(); CreateQueueBodyContent content = new CreateQueueBodyContent() .setType("application/xml") .setQueueDescription(description); createEntity.setContent(content); logger.info("Creating queue: {}", queueName); Response<Object> response = queuesClient.putWithResponseAsync(queueName, createEntity, null, Context.NONE) .block(timeout); assertNotNull(response); StepVerifier.create(queuesClient.deleteWithResponseAsync(queueName, Context.NONE)) .assertNext(deletedResponse -> assertEquals(200, deletedResponse.getStatusCode())) .verifyComplete(); }
assertNotNull(response);
void deleteQueue() { String queueName = testResourceNamer.randomName("test", 7); QueueDescription description = new QueueDescription().setMaxDeliveryCount(15); CreateQueueBody createEntity = new CreateQueueBody(); CreateQueueBodyContent content = new CreateQueueBodyContent() .setType("application/xml") .setQueueDescription(description); createEntity.setContent(content); logger.info("Creating queue: {}", queueName); Response<Object> response = queuesClient.putWithResponseAsync(queueName, createEntity, null, Context.NONE) .block(timeout); assertNotNull(response); StepVerifier.create(queuesClient.deleteWithResponseAsync(queueName, Context.NONE)) .assertNext(deletedResponse -> { assertEquals(200, deletedResponse.getStatusCode()); }) .verifyComplete(); }
class ServiceBusManagementClientImplIntegrationTests extends TestBase { private final ClientLogger logger = new ClientLogger(ServiceBusManagementClientImplIntegrationTests.class); private final ServiceBusManagementSerializer serializer = new ServiceBusManagementSerializer(); private final Duration timeout = Duration.ofSeconds(30); private QueuesImpl queuesClient; private ServiceBusManagementClientImpl managementClient; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } @Override protected void beforeTest() { Assumptions.assumeTrue(getTestMode() != TestMode.PLAYBACK, "Current record/playback does not support persisting XML calls."); final ConnectionStringProperties properties = new ConnectionStringProperties(TestUtils.getConnectionString()); final ServiceBusSharedKeyCredential credential = new ServiceBusSharedKeyCredential( properties.getSharedAccessKeyName(), properties.getSharedAccessKey()); HttpPipeline pipeline = new HttpPipelineBuilder().policies( new UserAgentPolicy(), (context, next) -> { final String url = context.getHttpRequest().getUrl().toString(); return credential.getToken(new TokenRequestContext().addScopes(url)).flatMap(token -> { context.getHttpRequest().getHeaders().put("Authorization", token.getToken()); return next.process(); }); }, new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS) .addAllowedQueryParamName("api-version")), new RetryPolicy() ).build(); managementClient = new ServiceBusManagementClientImplBuilder() .serializer(serializer) .endpoint(properties.getEndpoint().getHost()) .apiVersion("2017-04") .pipeline(pipeline) .buildClient(); queuesClient = managementClient.getQueues(); } /** * Verifies we can get queue information. */ @Test void getQueue() { String queueName = TestUtils.getQueueName(); StepVerifier.create(queuesClient.getWithResponseAsync(queueName, true, Context.NONE)) .assertNext(response -> { final QueueDescriptionResponse deserialize = deserialize(response, QueueDescriptionResponse.class); assertNotNull(deserialize); assertNotNull(deserialize.getContent()); final QueueDescription properties = deserialize.getContent().getQueueDescription(); assertNotNull(properties); assertFalse(properties.getLockDuration().isZero()); }) .verifyComplete(); } /** * Verifies we can create a queue. */ @Test void createQueue() { String queueName = testResourceNamer.randomName("test", 7); QueueDescription description = new QueueDescription().setMaxDeliveryCount(15); CreateQueueBody createEntity = new CreateQueueBody(); CreateQueueBodyContent content = new CreateQueueBodyContent() .setType("application/xml") .setQueueDescription(description); createEntity.setContent(content); logger.info("Creating queue: {}", queueName); StepVerifier.create(queuesClient.putWithResponseAsync(queueName, createEntity, null, Context.NONE)) .assertNext(response -> { Object body = response.getValue(); QueueDescription deserialize = null; try { deserialize = new ServiceBusManagementSerializer() .deserialize(String.valueOf(body), QueueDescription.class); } catch (IOException e) { fail("An exception was thrown. " + e); } assertNotNull(deserialize); }) .verifyComplete(); } /** * Verifies we can delete a queue. */ @Test /** * Verifies that we can edit properties on an existing queue. */ @Test void editQueue() { final String queueName = "q-5"; final Response<Object> response = queuesClient.getWithResponseAsync(queueName, true, Context.NONE) .block(Duration.ofSeconds(30)); assertNotNull(response); final QueueDescriptionResponse deserialize = deserialize(response, QueueDescriptionResponse.class); final QueueDescription properties = deserialize.getContent().getQueueDescription(); final int maxDeliveryCount = properties.getMaxDeliveryCount(); final int newDeliveryCount = maxDeliveryCount + 5; final Duration lockDuration = properties.getLockDuration(); final Duration newLockDuration = lockDuration.plusSeconds(40); final Duration autoDeleteOnIdle = Duration.ofDays(5); properties.setMaxDeliveryCount(newDeliveryCount); properties.setLockDuration(newLockDuration); properties.setAutoDeleteOnIdle(autoDeleteOnIdle); CreateQueueBody updated = new CreateQueueBody().setContent( new CreateQueueBodyContent().setQueueDescription(properties).setType("application/xml")); StepVerifier.create(queuesClient.putWithResponseAsync(queueName, updated, "*", Context.NONE)) .assertNext(update -> { final QueueDescriptionResponse updatedProperties = deserialize(update, QueueDescriptionResponse.class); assertNotNull(updatedProperties); }).verifyComplete(); } /** * Verifies we can list queues. */ @Test void listQueues() { String entityType = "queues"; StepVerifier.create(managementClient.listEntitiesWithResponseAsync(entityType, 0, 100, Context.NONE)) .assertNext(response -> { Object body = response.getValue(); QueueDescriptionFeed deserialize = null; try { deserialize = new ServiceBusManagementSerializer() .deserialize(String.valueOf(body), QueueDescriptionFeed.class); } catch (IOException e) { fail("An exception was thrown. " + e); } assertNotNull(deserialize); assertNotNull(deserialize.getEntry()); assertTrue(deserialize.getEntry().size() > 2); }) .verifyComplete(); } private <T> T deserialize(Response<Object> response, Class<T> clazz) { final Object body = response.getValue(); final String contents = String.valueOf(body); final T deserialize; try { deserialize = serializer.deserialize(contents, clazz); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(String.format( "Exception while deserializing. Body: [%s]. Class: %s", contents, clazz), e)); } if (deserialize == null) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "'deserialize' should not be null. Body: [%s]. Class: [%s]", contents, clazz))); } return deserialize; } }
class ServiceBusManagementClientImplIntegrationTests extends TestBase { private final ClientLogger logger = new ClientLogger(ServiceBusManagementClientImplIntegrationTests.class); private final ServiceBusManagementSerializer serializer = new ServiceBusManagementSerializer(); private final Duration timeout = Duration.ofSeconds(30); private QueuesImpl queuesClient; private ServiceBusManagementClientImpl managementClient; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } @Override protected void beforeTest() { Assumptions.assumeTrue(getTestMode() != TestMode.PLAYBACK, "Current record/playback does not support persisting XML calls."); ConnectionStringProperties properties = new ConnectionStringProperties(TestUtils.getConnectionString()); ServiceBusSharedKeyCredential credential = new ServiceBusSharedKeyCredential( properties.getSharedAccessKeyName(), properties.getSharedAccessKey()); HttpPipeline pipeline = new HttpPipelineBuilder().policies( new UserAgentPolicy(), new ServiceBusTokenCredentialHttpPolicy(credential), new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS) .addAllowedQueryParamName("api-version")), new RetryPolicy() ).build(); managementClient = new ServiceBusManagementClientImplBuilder() .serializer(serializer) .endpoint(properties.getEndpoint().getHost()) .apiVersion("2017-04") .pipeline(pipeline) .buildClient(); queuesClient = managementClient.getQueues(); } /** * Verifies we can get queue information. */ @Test void getQueue() { String queueName = TestUtils.getQueueName(); StepVerifier.create(queuesClient.getWithResponseAsync(queueName, true, Context.NONE)) .assertNext(response -> { final QueueDescriptionResponse deserialize = deserialize(response, QueueDescriptionResponse.class); assertNotNull(deserialize); assertNotNull(deserialize.getContent()); final QueueDescription properties = deserialize.getContent().getQueueDescription(); assertNotNull(properties); assertFalse(properties.getLockDuration().isZero()); }) .verifyComplete(); } /** * Verifies we can create a queue. */ @Test void createQueue() { String queueName = testResourceNamer.randomName("test", 7); QueueDescription description = new QueueDescription().setMaxDeliveryCount(15); CreateQueueBody createEntity = new CreateQueueBody(); CreateQueueBodyContent content = new CreateQueueBodyContent() .setType("application/xml") .setQueueDescription(description); createEntity.setContent(content); logger.info("Creating queue: {}", queueName); StepVerifier.create(queuesClient.putWithResponseAsync(queueName, createEntity, null, Context.NONE)) .assertNext(response -> { Object body = response.getValue(); QueueDescription deserialize = null; try { deserialize = new ServiceBusManagementSerializer() .deserialize(String.valueOf(body), QueueDescription.class); } catch (IOException e) { fail("An exception was thrown. " + e); } assertNotNull(deserialize); }) .verifyComplete(); } /** * Verifies we can delete a queue. */ @Test /** * Verifies that we can edit properties on an existing queue. */ @Test void editQueue() { final String queueName = "q-5"; final Response<Object> response = queuesClient.getWithResponseAsync(queueName, true, Context.NONE) .block(Duration.ofSeconds(30)); assertNotNull(response); final QueueDescriptionResponse deserialize = deserialize(response, QueueDescriptionResponse.class); final QueueDescription properties = deserialize.getContent().getQueueDescription(); final int maxDeliveryCount = properties.getMaxDeliveryCount(); final int newDeliveryCount = maxDeliveryCount + 5; final Duration lockDuration = properties.getLockDuration(); final Duration newLockDuration = lockDuration.plusSeconds(40); final Duration autoDeleteOnIdle = Duration.ofDays(5); properties.setMaxDeliveryCount(newDeliveryCount); properties.setLockDuration(newLockDuration); properties.setAutoDeleteOnIdle(autoDeleteOnIdle); CreateQueueBody updated = new CreateQueueBody().setContent( new CreateQueueBodyContent().setQueueDescription(properties).setType("application/xml")); StepVerifier.create(queuesClient.putWithResponseAsync(queueName, updated, "*", Context.NONE)) .assertNext(update -> { final QueueDescriptionResponse updatedProperties = deserialize(update, QueueDescriptionResponse.class); assertNotNull(updatedProperties); }).verifyComplete(); } /** * Verifies we can list queues. */ @Test void listQueues() { String entityType = "queues"; StepVerifier.create(managementClient.listEntitiesWithResponseAsync(entityType, 0, 100, Context.NONE)) .assertNext(response -> { Object body = response.getValue(); QueueDescriptionFeed deserialize = null; try { deserialize = new ServiceBusManagementSerializer() .deserialize(String.valueOf(body), QueueDescriptionFeed.class); } catch (IOException e) { fail("An exception was thrown. " + e); } assertNotNull(deserialize); assertNotNull(deserialize.getEntry()); assertTrue(deserialize.getEntry().size() > 2); }) .verifyComplete(); } private <T> T deserialize(Response<Object> response, Class<T> clazz) { final Object body = response.getValue(); final String contents = String.valueOf(body); final T deserialize; try { deserialize = serializer.deserialize(contents, clazz); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(String.format( "Exception while deserializing. Body: [%s]. Class: %s", contents, clazz), e)); } if (deserialize == null) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "'deserialize' should not be null. Body: [%s]. Class: [%s]", contents, clazz))); } return deserialize; } }
`queueDescription` as arg name will be more appropriate ?
public QueueDescription createQueue(QueueDescription queue) { return asyncClient.createQueue(queue).block(); }
return asyncClient.createQueue(queue).block();
public QueueDescription createQueue(QueueDescription queue) { return asyncClient.createQueue(queue).block(); }
class ServiceBusManagementClient { private final ServiceBusManagementAsyncClient asyncClient; /** * Creates a new instance with the given client. * * @param asyncClient Asynchronous client to perform management calls through. */ ServiceBusManagementClient(ServiceBusManagementAsyncClient asyncClient) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); } /** * Creates a queue the {@link QueueDescription}. * * @param queue Information about the queue to create. * * @return The created queue. * @throws NullPointerException if {@code queue} is null. * @throws IllegalArgumentException if {@link QueueDescription * string. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param queue The queue to create. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The created queue in addition to the HTTP response. * @throws NullPointerException if {@code queue} is null. * @throws IllegalArgumentException if {@link QueueDescription * string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueDescription> createQueueWithResponse(QueueDescription queue, Context context) { return asyncClient.createQueueWithResponse(queue, context).block(); } /** * Deletes a queue the matching {@code queueName}. * * @param queueName Name of queue to delete. * * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public void deleteQueue(String queueName) { asyncClient.deleteQueue(queueName).block(); } /** * Deletes a queue the matching {@code queueName} and returns the HTTP response. * * @param queueName Name of queue to delete. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The HTTP response when the queue is successfully deleted. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> deleteQueueWithResponse(String queueName, Context context) { return asyncClient.deleteQueueWithResponse(queueName, context).block(); } /** * Gets information about the queue. * * @param queueName Name of queue to get information about. * * @return Information about the queue. * @throws NullPointerException if {@code queueName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueDescription getQueue(String queueName) { return asyncClient.getQueue(queueName).block(); } /** * Gets information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return A Mono that completes with information about the queue and the associated HTTP response. * @throws NullPointerException if {@code queueName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueDescription> getQueueWithResponse(String queueName, Context context) { return asyncClient.getQueueWithResponse(queueName, context).block(); } /** * Gets runtime information about the queue. * * @param queueName Name of queue to get information about. * * @return Runtime information about the queue. * @throws NullPointerException if {@code queueName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueRuntimeInfo getQueueRuntimeInfo(String queueName) { return asyncClient.getQueueRuntimeInfo(queueName).block(); } /** * Gets runtime information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return Runtime information about the queue and the associated HTTP response. * @throws NullPointerException if {@code queueName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueRuntimeInfo> getQueueRuntimeInfoWithResponse(String queueName, Context context) { return asyncClient.getQueueRuntimeInfoWithResponse(queueName, context).block(); } /** * Fetches all the queues in the Service Bus namespace. * * @return A PagedIterable of {@link QueueDescription queues} in the Service Bus namespace. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<QueueDescription> listQueues() { return new PagedIterable<>(asyncClient.listQueues()); } /** * Fetches all the queues in the Service Bus namespace. * * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A PagedIterable of {@link QueueDescription queues} in the Service Bus namespace. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<QueueDescription> listQueues(Context context) { final PagedFlux<QueueDescription> pagedFlux = new PagedFlux<>( () -> asyncClient.listQueuesFirstPage(context), continuationToken -> asyncClient.listQueuesNextPage(continuationToken, context)); return new PagedIterable<>(pagedFlux); } /** * Creates a queue the {@link QueueDescription}. * * @param queue Information about the queue to create. * * @return A Mono that completes with information about the created queue. * @throws NullPointerException if {@code queue} is null. * @throws IllegalArgumentException if {@link QueueDescription * string. */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueDescription updateQueue(QueueDescription queue) { return asyncClient.updateQueue(queue).block(); } /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param queue The queue to create. * * @return A Mono that returns the created queue in addition to the HTTP response. * @throws NullPointerException if {@code queue} is null. * @throws IllegalArgumentException if {@link QueueDescription * string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueDescription> updateQueueWithResponse(QueueDescription queue, Context context) { return asyncClient.updateQueueWithResponse(queue, context).block(); } }
class ServiceBusManagementClient { private final ServiceBusManagementAsyncClient asyncClient; /** * Creates a new instance with the given client. * * @param asyncClient Asynchronous client to perform management calls through. */ ServiceBusManagementClient(ServiceBusManagementAsyncClient asyncClient) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); } /** * Creates a queue the {@link QueueDescription}. * * @param queue Information about the queue to create. * * @return The created queue. * @throws NullPointerException if {@code queue} is null. * @throws IllegalArgumentException if {@link QueueDescription * string. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param queue The queue to create. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The created queue in addition to the HTTP response. * @throws NullPointerException if {@code queue} is null. * @throws IllegalArgumentException if {@link QueueDescription * string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueDescription> createQueueWithResponse(QueueDescription queue, Context context) { return asyncClient.createQueueWithResponse(queue, context).block(); } /** * Deletes a queue the matching {@code queueName}. * * @param queueName Name of queue to delete. * * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public void deleteQueue(String queueName) { asyncClient.deleteQueue(queueName).block(); } /** * Deletes a queue the matching {@code queueName} and returns the HTTP response. * * @param queueName Name of queue to delete. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The HTTP response when the queue is successfully deleted. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> deleteQueueWithResponse(String queueName, Context context) { return asyncClient.deleteQueueWithResponse(queueName, context).block(); } /** * Gets information about the queue. * * @param queueName Name of queue to get information about. * * @return Information about the queue. * @throws NullPointerException if {@code queueName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueDescription getQueue(String queueName) { return asyncClient.getQueue(queueName).block(); } /** * Gets information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return Information about the queue and the associated HTTP response. * @throws NullPointerException if {@code queueName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueDescription> getQueueWithResponse(String queueName, Context context) { return asyncClient.getQueueWithResponse(queueName, context).block(); } /** * Gets runtime information about the queue. * * @param queueName Name of queue to get information about. * * @return Runtime information about the queue. * @throws NullPointerException if {@code queueName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueRuntimeInfo getQueueRuntimeInfo(String queueName) { return asyncClient.getQueueRuntimeInfo(queueName).block(); } /** * Gets runtime information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return Runtime information about the queue and the associated HTTP response. * @throws NullPointerException if {@code queueName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueRuntimeInfo> getQueueRuntimeInfoWithResponse(String queueName, Context context) { return asyncClient.getQueueRuntimeInfoWithResponse(queueName, context).block(); } /** * Fetches all the queues in the Service Bus namespace. * * @return A PagedIterable of {@link QueueDescription queues} in the Service Bus namespace. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<QueueDescription> listQueues() { return new PagedIterable<>(asyncClient.listQueues()); } /** * Fetches all the queues in the Service Bus namespace. * * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A PagedIterable of {@link QueueDescription queues} in the Service Bus namespace. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<QueueDescription> listQueues(Context context) { final PagedFlux<QueueDescription> pagedFlux = new PagedFlux<>( () -> asyncClient.listQueuesFirstPage(context), continuationToken -> asyncClient.listQueuesNextPage(continuationToken, context)); return new PagedIterable<>(pagedFlux); } /** * Updates a queue with the given {@link QueueDescription}. The {@link QueueDescription} must be fully populated as * all of the properties are replaced. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. They are: * <ul> * <li>{@link QueueDescription * <li>{@link QueueDescription * <li>{@link QueueDescription * </li> * <li>{@link QueueDescription * </ul> * * @see <a href="https: * @param queue Information about the queue to update. * * @return The updated queue. * @throws NullPointerException if {@code queue} is null. * @throws IllegalArgumentException if {@link QueueDescription * string. */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueDescription updateQueue(QueueDescription queue) { return asyncClient.updateQueue(queue).block(); } /** * Updates a queue with the given {@link QueueDescription}. The {@link QueueDescription} must be fully populated as * all of the properties are replaced. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. They are: * <ul> * <li>{@link QueueDescription * <li>{@link QueueDescription * <li>{@link QueueDescription * </li> * <li>{@link QueueDescription * </ul> * * @see <a href="https: * @param queue The queue to update. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The updated queue with its HTTP response. * @throws NullPointerException if {@code queue} is null. * @throws IllegalArgumentException if {@link QueueDescription * string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueDescription> updateQueueWithResponse(QueueDescription queue, Context context) { return asyncClient.updateQueueWithResponse(queue, context).block(); } }
Went with .NET's parameter name.
public QueueDescription createQueue(QueueDescription queue) { return asyncClient.createQueue(queue).block(); }
return asyncClient.createQueue(queue).block();
public QueueDescription createQueue(QueueDescription queue) { return asyncClient.createQueue(queue).block(); }
class ServiceBusManagementClient { private final ServiceBusManagementAsyncClient asyncClient; /** * Creates a new instance with the given client. * * @param asyncClient Asynchronous client to perform management calls through. */ ServiceBusManagementClient(ServiceBusManagementAsyncClient asyncClient) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); } /** * Creates a queue the {@link QueueDescription}. * * @param queue Information about the queue to create. * * @return The created queue. * @throws NullPointerException if {@code queue} is null. * @throws IllegalArgumentException if {@link QueueDescription * string. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param queue The queue to create. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The created queue in addition to the HTTP response. * @throws NullPointerException if {@code queue} is null. * @throws IllegalArgumentException if {@link QueueDescription * string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueDescription> createQueueWithResponse(QueueDescription queue, Context context) { return asyncClient.createQueueWithResponse(queue, context).block(); } /** * Deletes a queue the matching {@code queueName}. * * @param queueName Name of queue to delete. * * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public void deleteQueue(String queueName) { asyncClient.deleteQueue(queueName).block(); } /** * Deletes a queue the matching {@code queueName} and returns the HTTP response. * * @param queueName Name of queue to delete. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The HTTP response when the queue is successfully deleted. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> deleteQueueWithResponse(String queueName, Context context) { return asyncClient.deleteQueueWithResponse(queueName, context).block(); } /** * Gets information about the queue. * * @param queueName Name of queue to get information about. * * @return Information about the queue. * @throws NullPointerException if {@code queueName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueDescription getQueue(String queueName) { return asyncClient.getQueue(queueName).block(); } /** * Gets information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return A Mono that completes with information about the queue and the associated HTTP response. * @throws NullPointerException if {@code queueName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueDescription> getQueueWithResponse(String queueName, Context context) { return asyncClient.getQueueWithResponse(queueName, context).block(); } /** * Gets runtime information about the queue. * * @param queueName Name of queue to get information about. * * @return Runtime information about the queue. * @throws NullPointerException if {@code queueName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueRuntimeInfo getQueueRuntimeInfo(String queueName) { return asyncClient.getQueueRuntimeInfo(queueName).block(); } /** * Gets runtime information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return Runtime information about the queue and the associated HTTP response. * @throws NullPointerException if {@code queueName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueRuntimeInfo> getQueueRuntimeInfoWithResponse(String queueName, Context context) { return asyncClient.getQueueRuntimeInfoWithResponse(queueName, context).block(); } /** * Fetches all the queues in the Service Bus namespace. * * @return A PagedIterable of {@link QueueDescription queues} in the Service Bus namespace. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<QueueDescription> listQueues() { return new PagedIterable<>(asyncClient.listQueues()); } /** * Fetches all the queues in the Service Bus namespace. * * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A PagedIterable of {@link QueueDescription queues} in the Service Bus namespace. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<QueueDescription> listQueues(Context context) { final PagedFlux<QueueDescription> pagedFlux = new PagedFlux<>( () -> asyncClient.listQueuesFirstPage(context), continuationToken -> asyncClient.listQueuesNextPage(continuationToken, context)); return new PagedIterable<>(pagedFlux); } /** * Creates a queue the {@link QueueDescription}. * * @param queue Information about the queue to create. * * @return A Mono that completes with information about the created queue. * @throws NullPointerException if {@code queue} is null. * @throws IllegalArgumentException if {@link QueueDescription * string. */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueDescription updateQueue(QueueDescription queue) { return asyncClient.updateQueue(queue).block(); } /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param queue The queue to create. * * @return A Mono that returns the created queue in addition to the HTTP response. * @throws NullPointerException if {@code queue} is null. * @throws IllegalArgumentException if {@link QueueDescription * string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueDescription> updateQueueWithResponse(QueueDescription queue, Context context) { return asyncClient.updateQueueWithResponse(queue, context).block(); } }
class ServiceBusManagementClient { private final ServiceBusManagementAsyncClient asyncClient; /** * Creates a new instance with the given client. * * @param asyncClient Asynchronous client to perform management calls through. */ ServiceBusManagementClient(ServiceBusManagementAsyncClient asyncClient) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); } /** * Creates a queue the {@link QueueDescription}. * * @param queue Information about the queue to create. * * @return The created queue. * @throws NullPointerException if {@code queue} is null. * @throws IllegalArgumentException if {@link QueueDescription * string. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param queue The queue to create. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The created queue in addition to the HTTP response. * @throws NullPointerException if {@code queue} is null. * @throws IllegalArgumentException if {@link QueueDescription * string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueDescription> createQueueWithResponse(QueueDescription queue, Context context) { return asyncClient.createQueueWithResponse(queue, context).block(); } /** * Deletes a queue the matching {@code queueName}. * * @param queueName Name of queue to delete. * * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public void deleteQueue(String queueName) { asyncClient.deleteQueue(queueName).block(); } /** * Deletes a queue the matching {@code queueName} and returns the HTTP response. * * @param queueName Name of queue to delete. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The HTTP response when the queue is successfully deleted. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> deleteQueueWithResponse(String queueName, Context context) { return asyncClient.deleteQueueWithResponse(queueName, context).block(); } /** * Gets information about the queue. * * @param queueName Name of queue to get information about. * * @return Information about the queue. * @throws NullPointerException if {@code queueName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueDescription getQueue(String queueName) { return asyncClient.getQueue(queueName).block(); } /** * Gets information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return Information about the queue and the associated HTTP response. * @throws NullPointerException if {@code queueName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueDescription> getQueueWithResponse(String queueName, Context context) { return asyncClient.getQueueWithResponse(queueName, context).block(); } /** * Gets runtime information about the queue. * * @param queueName Name of queue to get information about. * * @return Runtime information about the queue. * @throws NullPointerException if {@code queueName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueRuntimeInfo getQueueRuntimeInfo(String queueName) { return asyncClient.getQueueRuntimeInfo(queueName).block(); } /** * Gets runtime information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return Runtime information about the queue and the associated HTTP response. * @throws NullPointerException if {@code queueName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueRuntimeInfo> getQueueRuntimeInfoWithResponse(String queueName, Context context) { return asyncClient.getQueueRuntimeInfoWithResponse(queueName, context).block(); } /** * Fetches all the queues in the Service Bus namespace. * * @return A PagedIterable of {@link QueueDescription queues} in the Service Bus namespace. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<QueueDescription> listQueues() { return new PagedIterable<>(asyncClient.listQueues()); } /** * Fetches all the queues in the Service Bus namespace. * * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A PagedIterable of {@link QueueDescription queues} in the Service Bus namespace. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<QueueDescription> listQueues(Context context) { final PagedFlux<QueueDescription> pagedFlux = new PagedFlux<>( () -> asyncClient.listQueuesFirstPage(context), continuationToken -> asyncClient.listQueuesNextPage(continuationToken, context)); return new PagedIterable<>(pagedFlux); } /** * Updates a queue with the given {@link QueueDescription}. The {@link QueueDescription} must be fully populated as * all of the properties are replaced. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. They are: * <ul> * <li>{@link QueueDescription * <li>{@link QueueDescription * <li>{@link QueueDescription * </li> * <li>{@link QueueDescription * </ul> * * @see <a href="https: * @param queue Information about the queue to update. * * @return The updated queue. * @throws NullPointerException if {@code queue} is null. * @throws IllegalArgumentException if {@link QueueDescription * string. */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueDescription updateQueue(QueueDescription queue) { return asyncClient.updateQueue(queue).block(); } /** * Updates a queue with the given {@link QueueDescription}. The {@link QueueDescription} must be fully populated as * all of the properties are replaced. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. They are: * <ul> * <li>{@link QueueDescription * <li>{@link QueueDescription * <li>{@link QueueDescription * </li> * <li>{@link QueueDescription * </ul> * * @see <a href="https: * @param queue The queue to update. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The updated queue with its HTTP response. * @throws NullPointerException if {@code queue} is null. * @throws IllegalArgumentException if {@link QueueDescription * string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueDescription> updateQueueWithResponse(QueueDescription queue, Context context) { return asyncClient.updateQueueWithResponse(queue, context).block(); } }
blockSize is long now right?
public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw new UnsupportedOperationException("Unsupported option: " + option.toString()); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream"); } AzureResource resource = new AzureResource(path); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq)); }
Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue();
public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw new UnsupportedOperationException("Unsupported option: " + option.toString()); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream"); } AzureResource resource = new AzureResource(path); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq)); }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); public static final String CONTENT_TYPE = "Content-Type"; public static final String CONTENT_DISPOSITION = "Content-Disposition"; public static final String CONTENT_LANGUAGE = "Content-Language"; public static final String CONTENT_ENCODING = "Content-Encoding"; public static final String CONTENT_MD5 = "Content-MD5"; public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns {@code "azb".} */ @Override public String getScheme() { return "azb"; } /** * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * {@inheritDoc} */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * {@inheritDoc} */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * {@inheritDoc} */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * {@inheritDoc} */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { return null; } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * {@inheritDoc} */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * {@inheritDoc} */ @Override /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. * * {@inheritDoc} * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * {@inheritDoc} * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic. It is possible to delete a file in use by another process, and doing so will not * immediately invalidate any channels open to that file--they will simply start to fail. Root directories cannot be * deleted even when empty. * {@inheritDoc} * * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic. More specifically, the checks necessary to validate the * inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered, * the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the nio javadocs, this method has the following requirements for successful completion. * {@link StandardCopyOption * if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the * destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be * thrown. The parent directory of the destination must at least weakly exist; if it does not, an * {@link IOException} will be thrown. The only supported option other than * {@link StandardCopyOption * other option will result in an {@link UnsupportedOperationException}. * * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * {@inheritDoc} * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzureResource destinationRes = new AzureResource(destination); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * {@inheritDoc} */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { } /** * {@inheritDoc} */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { return false; } /** * {@inheritDoc} */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * {@inheritDoc} */ @Override public FileStore getFileStore(Path path) throws IOException { return null; } /** * {@inheritDoc} */ @Override public void checkAccess(Path path, AccessMode... accessModes) throws IOException { } /** * {@inheritDoc} */ @Override public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) { return null; } /** * {@inheritDoc} */ @Override public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions) throws IOException { return null; } /** * {@inheritDoc} */ @Override public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException { return null; } /** * {@inheritDoc} */ @Override public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException { } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); public static final String CONTENT_TYPE = "Content-Type"; public static final String CONTENT_DISPOSITION = "Content-Disposition"; public static final String CONTENT_LANGUAGE = "Content-Language"; public static final String CONTENT_ENCODING = "Content-Encoding"; public static final String CONTENT_MD5 = "Content-MD5"; public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns {@code "azb".} */ @Override public String getScheme() { return "azb"; } /** * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * {@inheritDoc} */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * {@inheritDoc} */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * {@inheritDoc} */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * {@inheritDoc} */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { return null; } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * {@inheritDoc} */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * {@inheritDoc} */ @Override /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. * * {@inheritDoc} * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * {@inheritDoc} * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic. It is possible to delete a file in use by another process, and doing so will not * immediately invalidate any channels open to that file--they will simply start to fail. Root directories cannot be * deleted even when empty. * {@inheritDoc} * * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic. More specifically, the checks necessary to validate the * inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered, * the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the nio javadocs, this method has the following requirements for successful completion. * {@link StandardCopyOption * if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the * destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be * thrown. The parent directory of the destination must at least weakly exist; if it does not, an * {@link IOException} will be thrown. The only supported option other than * {@link StandardCopyOption * other option will result in an {@link UnsupportedOperationException}. * * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * {@inheritDoc} * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzureResource destinationRes = new AzureResource(destination); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * {@inheritDoc} */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { } /** * {@inheritDoc} */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { return false; } /** * {@inheritDoc} */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * {@inheritDoc} */ @Override public FileStore getFileStore(Path path) throws IOException { return null; } /** * {@inheritDoc} */ @Override public void checkAccess(Path path, AccessMode... accessModes) throws IOException { } /** * {@inheritDoc} */ @Override public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) { return null; } /** * {@inheritDoc} */ @Override public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions) throws IOException { return null; } /** * {@inheritDoc} */ @Override public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException { return null; } /** * {@inheritDoc} */ @Override public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException { } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
Or is this on purpose due to constraints below?
public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw new UnsupportedOperationException("Unsupported option: " + option.toString()); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream"); } AzureResource resource = new AzureResource(path); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq)); }
Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue();
public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw new UnsupportedOperationException("Unsupported option: " + option.toString()); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream"); } AzureResource resource = new AzureResource(path); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq)); }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); public static final String CONTENT_TYPE = "Content-Type"; public static final String CONTENT_DISPOSITION = "Content-Disposition"; public static final String CONTENT_LANGUAGE = "Content-Language"; public static final String CONTENT_ENCODING = "Content-Encoding"; public static final String CONTENT_MD5 = "Content-MD5"; public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns {@code "azb".} */ @Override public String getScheme() { return "azb"; } /** * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * {@inheritDoc} */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * {@inheritDoc} */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * {@inheritDoc} */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * {@inheritDoc} */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { return null; } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * {@inheritDoc} */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * {@inheritDoc} */ @Override /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. * * {@inheritDoc} * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * {@inheritDoc} * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic. It is possible to delete a file in use by another process, and doing so will not * immediately invalidate any channels open to that file--they will simply start to fail. Root directories cannot be * deleted even when empty. * {@inheritDoc} * * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic. More specifically, the checks necessary to validate the * inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered, * the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the nio javadocs, this method has the following requirements for successful completion. * {@link StandardCopyOption * if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the * destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be * thrown. The parent directory of the destination must at least weakly exist; if it does not, an * {@link IOException} will be thrown. The only supported option other than * {@link StandardCopyOption * other option will result in an {@link UnsupportedOperationException}. * * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * {@inheritDoc} * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzureResource destinationRes = new AzureResource(destination); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * {@inheritDoc} */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { } /** * {@inheritDoc} */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { return false; } /** * {@inheritDoc} */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * {@inheritDoc} */ @Override public FileStore getFileStore(Path path) throws IOException { return null; } /** * {@inheritDoc} */ @Override public void checkAccess(Path path, AccessMode... accessModes) throws IOException { } /** * {@inheritDoc} */ @Override public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) { return null; } /** * {@inheritDoc} */ @Override public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions) throws IOException { return null; } /** * {@inheritDoc} */ @Override public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException { return null; } /** * {@inheritDoc} */ @Override public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException { } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); public static final String CONTENT_TYPE = "Content-Type"; public static final String CONTENT_DISPOSITION = "Content-Disposition"; public static final String CONTENT_LANGUAGE = "Content-Language"; public static final String CONTENT_ENCODING = "Content-Encoding"; public static final String CONTENT_MD5 = "Content-MD5"; public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns {@code "azb".} */ @Override public String getScheme() { return "azb"; } /** * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * {@inheritDoc} */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * {@inheritDoc} */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * {@inheritDoc} */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * {@inheritDoc} */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { return null; } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * {@inheritDoc} */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * {@inheritDoc} */ @Override /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. * * {@inheritDoc} * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * {@inheritDoc} * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic. It is possible to delete a file in use by another process, and doing so will not * immediately invalidate any channels open to that file--they will simply start to fail. Root directories cannot be * deleted even when empty. * {@inheritDoc} * * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic. More specifically, the checks necessary to validate the * inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered, * the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the nio javadocs, this method has the following requirements for successful completion. * {@link StandardCopyOption * if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the * destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be * thrown. The parent directory of the destination must at least weakly exist; if it does not, an * {@link IOException} will be thrown. The only supported option other than * {@link StandardCopyOption * other option will result in an {@link UnsupportedOperationException}. * * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * {@inheritDoc} * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzureResource destinationRes = new AzureResource(destination); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * {@inheritDoc} */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { } /** * {@inheritDoc} */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { return false; } /** * {@inheritDoc} */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * {@inheritDoc} */ @Override public FileStore getFileStore(Path path) throws IOException { return null; } /** * {@inheritDoc} */ @Override public void checkAccess(Path path, AccessMode... accessModes) throws IOException { } /** * {@inheritDoc} */ @Override public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) { return null; } /** * {@inheritDoc} */ @Override public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions) throws IOException { return null; } /** * {@inheritDoc} */ @Override public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException { return null; } /** * {@inheritDoc} */ @Override public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException { } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
It's long in 73, which is why I made the config value a long in anticipation of that, but this branch doesn't have those updates yet, so I'll strike these conversions to int once all that gets merged in.
public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw new UnsupportedOperationException("Unsupported option: " + option.toString()); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream"); } AzureResource resource = new AzureResource(path); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq)); }
Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue();
public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw new UnsupportedOperationException("Unsupported option: " + option.toString()); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream"); } AzureResource resource = new AzureResource(path); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq)); }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); public static final String CONTENT_TYPE = "Content-Type"; public static final String CONTENT_DISPOSITION = "Content-Disposition"; public static final String CONTENT_LANGUAGE = "Content-Language"; public static final String CONTENT_ENCODING = "Content-Encoding"; public static final String CONTENT_MD5 = "Content-MD5"; public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns {@code "azb".} */ @Override public String getScheme() { return "azb"; } /** * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * {@inheritDoc} */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * {@inheritDoc} */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * {@inheritDoc} */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * {@inheritDoc} */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { return null; } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * {@inheritDoc} */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * {@inheritDoc} */ @Override /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. * * {@inheritDoc} * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * {@inheritDoc} * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic. It is possible to delete a file in use by another process, and doing so will not * immediately invalidate any channels open to that file--they will simply start to fail. Root directories cannot be * deleted even when empty. * {@inheritDoc} * * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic. More specifically, the checks necessary to validate the * inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered, * the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the nio javadocs, this method has the following requirements for successful completion. * {@link StandardCopyOption * if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the * destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be * thrown. The parent directory of the destination must at least weakly exist; if it does not, an * {@link IOException} will be thrown. The only supported option other than * {@link StandardCopyOption * other option will result in an {@link UnsupportedOperationException}. * * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * {@inheritDoc} * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzureResource destinationRes = new AzureResource(destination); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * {@inheritDoc} */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { } /** * {@inheritDoc} */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { return false; } /** * {@inheritDoc} */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * {@inheritDoc} */ @Override public FileStore getFileStore(Path path) throws IOException { return null; } /** * {@inheritDoc} */ @Override public void checkAccess(Path path, AccessMode... accessModes) throws IOException { } /** * {@inheritDoc} */ @Override public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) { return null; } /** * {@inheritDoc} */ @Override public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions) throws IOException { return null; } /** * {@inheritDoc} */ @Override public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException { return null; } /** * {@inheritDoc} */ @Override public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException { } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); public static final String CONTENT_TYPE = "Content-Type"; public static final String CONTENT_DISPOSITION = "Content-Disposition"; public static final String CONTENT_LANGUAGE = "Content-Language"; public static final String CONTENT_ENCODING = "Content-Encoding"; public static final String CONTENT_MD5 = "Content-MD5"; public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns {@code "azb".} */ @Override public String getScheme() { return "azb"; } /** * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * {@inheritDoc} */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * {@inheritDoc} */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * {@inheritDoc} */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * {@inheritDoc} */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { return null; } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * {@inheritDoc} */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * {@inheritDoc} */ @Override /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. * * {@inheritDoc} * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * {@inheritDoc} * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic. It is possible to delete a file in use by another process, and doing so will not * immediately invalidate any channels open to that file--they will simply start to fail. Root directories cannot be * deleted even when empty. * {@inheritDoc} * * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic. More specifically, the checks necessary to validate the * inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered, * the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the nio javadocs, this method has the following requirements for successful completion. * {@link StandardCopyOption * if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the * destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be * thrown. The parent directory of the destination must at least weakly exist; if it does not, an * {@link IOException} will be thrown. The only supported option other than * {@link StandardCopyOption * other option will result in an {@link UnsupportedOperationException}. * * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * {@inheritDoc} * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzureResource destinationRes = new AzureResource(destination); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * {@inheritDoc} */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { } /** * {@inheritDoc} */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { return false; } /** * {@inheritDoc} */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * {@inheritDoc} */ @Override public FileStore getFileStore(Path path) throws IOException { return null; } /** * {@inheritDoc} */ @Override public void checkAccess(Path path, AccessMode... accessModes) throws IOException { } /** * {@inheritDoc} */ @Override public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) { return null; } /** * {@inheritDoc} */ @Override public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions) throws IOException { return null; } /** * {@inheritDoc} */ @Override public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException { return null; } /** * {@inheritDoc} */ @Override public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException { } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
Add javadoc
public RecognizeCustomFormsOptions(final InputStream form, final long length, final String modelId) { super(form, length); this.modelId = Objects.requireNonNull(modelId, "'modelId' cannot be null"); }
}
public RecognizeCustomFormsOptions(final InputStream form, final long length, final String modelId) { super(form, length); this.modelId = modelId; }
class RecognizeCustomFormsOptions extends RecognizeOptions { private final String modelId; public RecognizeCustomFormsOptions(final Flux<ByteBuffer> formData, final long length, final String modelId) { super(formData, length); this.modelId = Objects.requireNonNull(modelId, "'modelId' cannot be null"); } public RecognizeCustomFormsOptions(final String formUrl, final String modelId) { super(formUrl); this.modelId = Objects.requireNonNull(modelId, "'modelId' cannot be null"); } public String getModelId() { return modelId; } @Override public RecognizeCustomFormsOptions setFormContentType(FormContentType formContentType) { super.setFormContentType(formContentType); return this; } @Override public RecognizeCustomFormsOptions setIncludeTextContent(boolean includeTextContent) { super.setIncludeTextContent(includeTextContent); return this; } @Override public RecognizeCustomFormsOptions setPollInterval(Duration pollInterval) { super.setPollInterval(pollInterval); return this; } }
class RecognizeCustomFormsOptions extends RecognizeOptions { private final String modelId; /** * Create a {@code RecognizeCustomFormsOptions option} object * * @param form The {@code InputStream data} of the form to recognize form information from. * @param length the exact length of the provided form data. * @param modelId The UUID string format custom trained model Id to be used. */ /** * Create a {@code RecognizeCustomFormsOptions option} object * * @param formData The {@code ByteBuffer data} of the form to recognize form information from. * @param length The exact length of the provided form data * @param modelId The UUID string format custom trained model Id to be used. */ public RecognizeCustomFormsOptions(final Flux<ByteBuffer> formData, final long length, final String modelId) { super(formData, length); this.modelId = modelId; } /** * Create a {@code RecognizeCustomFormsOptions option} object * * @param formUrl The source URL to the input form. * @param modelId The UUID string format custom trained model Id to be used. */ public RecognizeCustomFormsOptions(final String formUrl, final String modelId) { super(formUrl); this.modelId = modelId; } /** * Get the UUID string format custom trained model Id to be used. * * @return the {@code modelId} value */ public String getModelId() { return modelId; } @Override public RecognizeCustomFormsOptions setFormContentType(FormContentType formContentType) { super.setFormContentType(formContentType); return this; } @Override public RecognizeCustomFormsOptions setIncludeTextContent(boolean includeTextContent) { super.setIncludeTextContent(includeTextContent); return this; } @Override public RecognizeCustomFormsOptions setPollInterval(Duration pollInterval) { super.setPollInterval(pollInterval); return this; } }
Support polling by user.
public void canDeployVirtualNetworkSyncPoll() throws Exception { final String dp = "dpD" + testId; Accepted<Deployment> acceptedDeployment = resourceClient.deployments() .define(dp) .withExistingResourceGroup(rgName) .withTemplateLink(templateUri, contentVersion) .withParametersLink(parametersUri, contentVersion) .withMode(DeploymentMode.COMPLETE) .beginCreate(); Deployment createdDeployment = acceptedDeployment.getAcceptedResult(); Assertions.assertNotEquals("Succeeded", createdDeployment.provisioningState()); PollResponse<Void> pollResponse = acceptedDeployment.getSyncPoller().poll(); while (pollResponse.getStatus() != LongRunningOperationStatus.SUCCESSFULLY_COMPLETED) { SdkContext.sleep(1000); pollResponse = acceptedDeployment.getSyncPoller().poll(); } System.out.println("statuc " + pollResponse.getStatus()); Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollResponse.getStatus()); Deployment deployment = acceptedDeployment.getFinalResult(); Assertions.assertEquals("Succeeded", deployment.provisioningState()); }
}
public void canDeployVirtualNetworkSyncPoll() throws Exception { final String dp = "dpD" + testId; Accepted<Deployment> acceptedDeployment = resourceClient.deployments() .define(dp) .withExistingResourceGroup(rgName) .withTemplateLink(templateUri, contentVersion) .withParametersLink(parametersUri, contentVersion) .withMode(DeploymentMode.COMPLETE) .beginCreate(); Deployment createdDeployment = acceptedDeployment.getAcceptedResult().getValue(); Assertions.assertNotEquals("Succeeded", createdDeployment.provisioningState()); LongRunningOperationStatus pollStatus = acceptedDeployment.getAcceptedResult().getStatus(); int delayInMills = acceptedDeployment.getAcceptedResult().getRetryAfter() == null ? 0 : (int) acceptedDeployment.getAcceptedResult().getRetryAfter().toMillis(); while (pollStatus != LongRunningOperationStatus.SUCCESSFULLY_COMPLETED) { SdkContext.sleep(delayInMills); PollResponse<Void> pollResponse = acceptedDeployment.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? 10000 : (int) pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); Deployment deployment = acceptedDeployment.getFinalResult(); Assertions.assertEquals("Succeeded", deployment.provisioningState()); }
class DeploymentsTests extends ResourceManagerTestBase { private ResourceGroups resourceGroups; private ResourceGroup resourceGroup; private String testId; private String rgName; private static String templateUri = "https: private static String blankTemplateUri = "https: private static String parametersUri = "https: private static String updateTemplate = "{\"$schema\":\"https: private static String updateParameters = "{\"vnetAddressPrefix\":{\"value\":\"10.0.0.0/16\"},\"subnet1Name\":{\"value\":\"Subnet1\"},\"subnet1Prefix\":{\"value\":\"10.0.0.0/24\"}}"; private static String contentVersion = "1.0.0.0"; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { super.initializeClients(httpPipeline, profile); testId = sdkContext.randomResourceName("", 9); resourceGroups = resourceClient.resourceGroups(); rgName = "rg" + testId; resourceGroup = resourceGroups.define(rgName) .withRegion(Region.US_SOUTH_CENTRAL) .create(); } @Override protected void cleanUpResources() { resourceGroups.beginDeleteByName(rgName); } @Test public void canDeployVirtualNetwork() throws Exception { final String dpName = "dpA" + testId; resourceClient.deployments() .define(dpName) .withExistingResourceGroup(rgName) .withTemplateLink(templateUri, contentVersion) .withParametersLink(parametersUri, contentVersion) .withMode(DeploymentMode.COMPLETE) .create(); PagedIterable<Deployment> deployments = resourceClient.deployments().listByResourceGroup(rgName); boolean found = false; for (Deployment deployment : deployments) { if (deployment.name().equals(dpName)) { found = true; } } Assertions.assertTrue(found); Assertions.assertTrue(resourceClient.deployments().checkExistence(rgName, dpName)); Deployment deployment = resourceClient.deployments().getByResourceGroup(rgName, dpName); Assertions.assertNotNull(deployment); Assertions.assertEquals("Succeeded", deployment.provisioningState()); GenericResource generic = resourceClient.genericResources().get(rgName, "Microsoft.Network", "", "virtualnetworks", "VNet1", "2015-06-15"); Assertions.assertNotNull(generic); Assertions.assertNotNull(deployment.exportTemplate().templateAsJson()); Assertions.assertNotNull(resourceGroup.exportTemplate(ResourceGroupExportTemplateOptions.INCLUDE_BOTH)); PagedIterable<DeploymentOperation> operations = deployment.deploymentOperations().list(); Assertions.assertEquals(4, TestUtilities.getSize(operations)); DeploymentOperation op = deployment.deploymentOperations().getById(operations.iterator().next().operationId()); Assertions.assertNotNull(op); resourceClient.genericResources().delete(rgName, "Microsoft.Network", "", "virtualnetworks", "VNet1", "2015-06-15"); } @Test public void canPostDeploymentWhatIfOnResourceGroup() throws Exception { final String dpName = "dpA" + testId; resourceClient.deployments() .define(dpName) .withExistingResourceGroup(rgName) .withTemplateLink(templateUri, contentVersion) .withParametersLink(parametersUri, contentVersion) .withMode(DeploymentMode.COMPLETE) .create(); PagedIterable<Deployment> deployments = resourceClient.deployments().listByResourceGroup(rgName); boolean found = false; for (Deployment deployment : deployments) { if (deployment.name().equals(dpName)) { found = true; } } Assertions.assertTrue(found); Deployment deployment = resourceClient.deployments().getByResourceGroup(rgName, dpName); Assertions.assertNotNull(deployment); Assertions.assertEquals("Succeeded", deployment.provisioningState()); WhatIfOperationResult result = deployment.prepareWhatIf() .withIncrementalMode() .withWhatIfTemplateLink(templateUri, contentVersion) .whatIf(); Assertions.assertEquals("Succeeded", result.status()); Assertions.assertEquals(3, result.changes().size()); resourceClient.genericResources().delete(rgName, "Microsoft.Network", "", "virtualnetworks", "VNet1", "2015-06-15"); } @Test public void canPostDeploymentWhatIfOnSubscription() throws Exception { final String dpName = "dpA" + testId; resourceClient.deployments() .define(dpName) .withExistingResourceGroup(rgName) .withTemplateLink(templateUri, contentVersion) .withParametersLink(parametersUri, contentVersion) .withMode(DeploymentMode.COMPLETE) .create(); PagedIterable<Deployment> deployments = resourceClient.deployments().listByResourceGroup(rgName); boolean found = false; for (Deployment deployment : deployments) { if (deployment.name().equals(dpName)) { found = true; } } Assertions.assertTrue(found); Deployment deployment = resourceClient.deployments().getByResourceGroup(rgName, dpName); Assertions.assertNotNull(deployment); Assertions.assertEquals("Succeeded", deployment.provisioningState()); WhatIfOperationResult result = deployment.prepareWhatIf() .withLocation("westus") .withIncrementalMode() .withWhatIfTemplateLink(blankTemplateUri, contentVersion) .whatIfAtSubscriptionScope(); Assertions.assertEquals("Succeeded", result.status()); Assertions.assertEquals(0, result.changes().size()); resourceClient.genericResources().delete(rgName, "Microsoft.Network", "", "virtualnetworks", "VNet1", "2015-06-15"); } @Test @Disabled("deployment.cancel() doesn't throw but provisining state says Running not Cancelled...") public void canCancelVirtualNetworkDeployment() throws Exception { final String dp = "dpB" + testId; resourceClient.deployments() .define(dp) .withExistingResourceGroup(rgName) .withTemplateLink(templateUri, contentVersion) .withParametersLink(parametersUri, contentVersion) .withMode(DeploymentMode.COMPLETE) .beginCreate(); Deployment deployment = resourceClient.deployments().getByResourceGroup(rgName, dp); Assertions.assertEquals(dp, deployment.name()); deployment.cancel(); deployment = resourceClient.deployments().getByResourceGroup(rgName, dp); Assertions.assertEquals("Canceled", deployment.provisioningState()); Assertions.assertFalse(resourceClient.genericResources().checkExistence(rgName, "Microsoft.Network", "", "virtualnetworks", "VNet1", "2015-06-15")); } @Test public void canUpdateVirtualNetworkDeployment() throws Exception { final String dp = "dpC" + testId; Accepted<Deployment> acceptedDeployment = resourceClient.deployments() .define(dp) .withExistingResourceGroup(rgName) .withTemplateLink(templateUri, contentVersion) .withParametersLink(parametersUri, contentVersion) .withMode(DeploymentMode.COMPLETE) .beginCreate(); Deployment createdDeployment = acceptedDeployment.getAcceptedResult(); Deployment deployment = resourceClient.deployments().getByResourceGroup(rgName, dp); Assertions.assertEquals(createdDeployment.correlationId(), deployment.correlationId()); Assertions.assertEquals(dp, deployment.name()); deployment.cancel(); deployment = resourceClient.deployments().getByResourceGroup(rgName, dp); Assertions.assertEquals("Canceled", deployment.provisioningState()); deployment.update() .withTemplate(updateTemplate) .withParameters(updateParameters) .withMode(DeploymentMode.INCREMENTAL) .apply(); deployment = resourceClient.deployments().getByResourceGroup(rgName, dp); Assertions.assertEquals(DeploymentMode.INCREMENTAL, deployment.mode()); Assertions.assertEquals("Succeeded", deployment.provisioningState()); GenericResource genericVnet = resourceClient.genericResources().get(rgName, "Microsoft.Network", "", "virtualnetworks", "VNet2", "2015-06-15"); Assertions.assertNotNull(genericVnet); resourceClient.genericResources().delete(rgName, "Microsoft.Network", "", "virtualnetworks", "VNet2", "2015-06-15"); } @Test }
class DeploymentsTests extends ResourceManagerTestBase { private ResourceGroups resourceGroups; private ResourceGroup resourceGroup; private String testId; private String rgName; private static String templateUri = "https: private static String blankTemplateUri = "https: private static String parametersUri = "https: private static String updateTemplate = "{\"$schema\":\"https: private static String updateParameters = "{\"vnetAddressPrefix\":{\"value\":\"10.0.0.0/16\"},\"subnet1Name\":{\"value\":\"Subnet1\"},\"subnet1Prefix\":{\"value\":\"10.0.0.0/24\"}}"; private static String contentVersion = "1.0.0.0"; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { super.initializeClients(httpPipeline, profile); testId = sdkContext.randomResourceName("", 9); resourceGroups = resourceClient.resourceGroups(); rgName = "rg" + testId; resourceGroup = resourceGroups.define(rgName) .withRegion(Region.US_SOUTH_CENTRAL) .create(); } @Override protected void cleanUpResources() { resourceGroups.beginDeleteByName(rgName); } @Test public void canDeployVirtualNetwork() throws Exception { final String dpName = "dpA" + testId; resourceClient.deployments() .define(dpName) .withExistingResourceGroup(rgName) .withTemplateLink(templateUri, contentVersion) .withParametersLink(parametersUri, contentVersion) .withMode(DeploymentMode.COMPLETE) .create(); PagedIterable<Deployment> deployments = resourceClient.deployments().listByResourceGroup(rgName); boolean found = false; for (Deployment deployment : deployments) { if (deployment.name().equals(dpName)) { found = true; } } Assertions.assertTrue(found); Assertions.assertTrue(resourceClient.deployments().checkExistence(rgName, dpName)); Deployment deployment = resourceClient.deployments().getByResourceGroup(rgName, dpName); Assertions.assertNotNull(deployment); Assertions.assertEquals("Succeeded", deployment.provisioningState()); GenericResource generic = resourceClient.genericResources().get(rgName, "Microsoft.Network", "", "virtualnetworks", "VNet1", "2015-06-15"); Assertions.assertNotNull(generic); Assertions.assertNotNull(deployment.exportTemplate().templateAsJson()); Assertions.assertNotNull(resourceGroup.exportTemplate(ResourceGroupExportTemplateOptions.INCLUDE_BOTH)); PagedIterable<DeploymentOperation> operations = deployment.deploymentOperations().list(); Assertions.assertEquals(4, TestUtilities.getSize(operations)); DeploymentOperation op = deployment.deploymentOperations().getById(operations.iterator().next().operationId()); Assertions.assertNotNull(op); resourceClient.genericResources().delete(rgName, "Microsoft.Network", "", "virtualnetworks", "VNet1", "2015-06-15"); } @Test public void canPostDeploymentWhatIfOnResourceGroup() throws Exception { final String dpName = "dpA" + testId; resourceClient.deployments() .define(dpName) .withExistingResourceGroup(rgName) .withTemplateLink(templateUri, contentVersion) .withParametersLink(parametersUri, contentVersion) .withMode(DeploymentMode.COMPLETE) .create(); PagedIterable<Deployment> deployments = resourceClient.deployments().listByResourceGroup(rgName); boolean found = false; for (Deployment deployment : deployments) { if (deployment.name().equals(dpName)) { found = true; } } Assertions.assertTrue(found); Deployment deployment = resourceClient.deployments().getByResourceGroup(rgName, dpName); Assertions.assertNotNull(deployment); Assertions.assertEquals("Succeeded", deployment.provisioningState()); WhatIfOperationResult result = deployment.prepareWhatIf() .withIncrementalMode() .withWhatIfTemplateLink(templateUri, contentVersion) .whatIf(); Assertions.assertEquals("Succeeded", result.status()); Assertions.assertEquals(3, result.changes().size()); resourceClient.genericResources().delete(rgName, "Microsoft.Network", "", "virtualnetworks", "VNet1", "2015-06-15"); } @Test public void canPostDeploymentWhatIfOnSubscription() throws Exception { final String dpName = "dpA" + testId; resourceClient.deployments() .define(dpName) .withExistingResourceGroup(rgName) .withTemplateLink(templateUri, contentVersion) .withParametersLink(parametersUri, contentVersion) .withMode(DeploymentMode.COMPLETE) .create(); PagedIterable<Deployment> deployments = resourceClient.deployments().listByResourceGroup(rgName); boolean found = false; for (Deployment deployment : deployments) { if (deployment.name().equals(dpName)) { found = true; } } Assertions.assertTrue(found); Deployment deployment = resourceClient.deployments().getByResourceGroup(rgName, dpName); Assertions.assertNotNull(deployment); Assertions.assertEquals("Succeeded", deployment.provisioningState()); WhatIfOperationResult result = deployment.prepareWhatIf() .withLocation("westus") .withIncrementalMode() .withWhatIfTemplateLink(blankTemplateUri, contentVersion) .whatIfAtSubscriptionScope(); Assertions.assertEquals("Succeeded", result.status()); Assertions.assertEquals(0, result.changes().size()); resourceClient.genericResources().delete(rgName, "Microsoft.Network", "", "virtualnetworks", "VNet1", "2015-06-15"); } @Test @Disabled("deployment.cancel() doesn't throw but provisining state says Running not Cancelled...") public void canCancelVirtualNetworkDeployment() throws Exception { final String dp = "dpB" + testId; resourceClient.deployments() .define(dp) .withExistingResourceGroup(rgName) .withTemplateLink(templateUri, contentVersion) .withParametersLink(parametersUri, contentVersion) .withMode(DeploymentMode.COMPLETE) .beginCreate(); Deployment deployment = resourceClient.deployments().getByResourceGroup(rgName, dp); Assertions.assertEquals(dp, deployment.name()); deployment.cancel(); deployment = resourceClient.deployments().getByResourceGroup(rgName, dp); Assertions.assertEquals("Canceled", deployment.provisioningState()); Assertions.assertFalse(resourceClient.genericResources().checkExistence(rgName, "Microsoft.Network", "", "virtualnetworks", "VNet1", "2015-06-15")); } @Test public void canUpdateVirtualNetworkDeployment() throws Exception { final String dp = "dpC" + testId; Accepted<Deployment> acceptedDeployment = resourceClient.deployments() .define(dp) .withExistingResourceGroup(rgName) .withTemplateLink(templateUri, contentVersion) .withParametersLink(parametersUri, contentVersion) .withMode(DeploymentMode.COMPLETE) .beginCreate(); Deployment createdDeployment = acceptedDeployment.getAcceptedResult().getValue(); Deployment deployment = resourceClient.deployments().getByResourceGroup(rgName, dp); Assertions.assertEquals(createdDeployment.correlationId(), deployment.correlationId()); Assertions.assertEquals(dp, deployment.name()); deployment.cancel(); deployment = resourceClient.deployments().getByResourceGroup(rgName, dp); Assertions.assertEquals("Canceled", deployment.provisioningState()); deployment.update() .withTemplate(updateTemplate) .withParameters(updateParameters) .withMode(DeploymentMode.INCREMENTAL) .apply(); deployment = resourceClient.deployments().getByResourceGroup(rgName, dp); Assertions.assertEquals(DeploymentMode.INCREMENTAL, deployment.mode()); Assertions.assertEquals("Succeeded", deployment.provisioningState()); GenericResource genericVnet = resourceClient.genericResources().get(rgName, "Microsoft.Network", "", "virtualnetworks", "VNet2", "2015-06-15"); Assertions.assertNotNull(genericVnet); resourceClient.genericResources().delete(rgName, "Microsoft.Network", "", "virtualnetworks", "VNet2", "2015-06-15"); } @Test }
In Reactor, void return types won't have `onNext()`. So, this message will have to be printed in `onComplete()` consumer. Update other samples too. ```java .subscribe(ignored -> {}, error -> logger.error("...", error), () -> logger.info("Delete Entity Successful.")); ```
private void deleteEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); QueryOptions queryOptions = new QueryOptions(); queryOptions.setFilter("RowKey eq crayolaMarkers"); tableAsyncClient.queryEntity(queryOptions).flatMap(tableEntity -> { logger.info("Table Entity: " + tableEntity); Mono<Void> deleteEntityMono = tableAsyncClient.deleteEntity(tableEntity); return deleteEntityMono; }).subscribe(Void -> { logger.info("Delete Entity Successful."); }, error -> { logger.error("There was an error deleting the Entity. Error: " + error); }); }
logger.info("Delete Entity Successful.");
private void deleteEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { System.out.println("Table Entity: " + tableEntity); return tableAsyncClient.deleteEntity(tableEntity); }).subscribe( Void -> { }, error -> System.err.println("There was an error deleting the Entity. Error: " + error), () -> System.out.println("Delete Entity Successful.")); }
class TableServiceAsyncClientCodeSnippets { final ClientLogger logger = new ClientLogger("TableServiceAsyncClientCodeSnippets"); /** * create table code snippet */ public void createTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.createTable("OfficeSupplies").subscribe(Void -> { logger.info("Table creation successful."); }, error -> { logger.error("There was an error creating the table. Error: " + error); }); } /** * delete table code snippet */ public void deleteTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.deleteTable("OfficeSupplies").subscribe(Void -> { logger.info("Table deletion successful"); }, error -> { logger.error("There was an error deleting the table. Error: " + error); }); } /** * query tables code snippet */ public void queryTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); QueryOptions queryOptions = new QueryOptions(); queryOptions.setFilter("TableName eq OfficeSupplies"); tableServiceAsyncClient.queryTables(queryOptions).subscribe(azureTable -> { logger.info(azureTable.getName()); }, error -> { logger.error("There was an error querying the service. Error: " + error); }); } /** * insert entity code snippet */ private void insertEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); Map<String, Object> properties = new HashMap<>(); properties.put("RowKey", "crayolaMarkers"); properties.put("PartitionKey", "markers"); tableAsyncClient.createEntity(properties).subscribe(tableEntity -> { logger.info("Insert Entity Successful. Entity: " + tableEntity); }, error -> { logger.error("There was an error inserting the Entity. Error: " + error); }); } /** * delete entity code snippet */ /** * upsert entity code snippet */ private void upsert() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); QueryOptions queryOptions = new QueryOptions(); queryOptions.setFilter("RowKey eq crayolaMarkers"); tableAsyncClient.queryEntity(queryOptions).flatMap(tableEntity -> { logger.info("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.upsertEntity(UpdateMode.Merge, tableEntity); return updateEntityMono; }).subscribe(Void -> { logger.info("Update Entity Successful."); }, error -> { logger.error("There was an error updating the Entity. Error: " + error); }); } /** * update entity code snippet */ private void update() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); QueryOptions queryOptions = new QueryOptions(); queryOptions.setFilter("RowKey eq crayolaMarkers"); tableAsyncClient.queryEntity(queryOptions).flatMap(tableEntity -> { logger.info("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.updateEntity(UpdateMode.Replace, tableEntity); return updateEntityMono; }).subscribe(Void -> { logger.info("Update Entity Successful."); }, error -> { logger.error("There was an error updating the Entity. Error: " + error); }); } /** * query entity code snippet */ private void queryEntities() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); QueryOptions queryOptions = new QueryOptions(); queryOptions.setFilter("Product eq markers"); queryOptions.setSelect("Seller, Price"); tableAsyncClient.queryEntity(queryOptions).subscribe(tableEntity -> { logger.info("Table Entity: " + tableEntity); }, error -> { logger.error("There was an error querying the table. Error: " + error); }); } /** * checks to see if an entity exists code snippet */ private void existsEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); tableAsyncClient.queryEntitiesWithPartitionAndRowKey("crayolaMarkers", "markers") .subscribe(tableEntity -> { logger.info("Table Entity exists: " + tableEntity); }, error -> { logger.error("There was an error querying the table. Error: " + error); }); } }
class TableServiceAsyncClientCodeSnippets { /** * create table code snippet */ public void createTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.createTable("OfficeSupplies").subscribe( Void -> { }, error -> System.err.println("There was an error creating the table. Error: " + error), () -> System.out.println("Table creation successful.")); } /** * delete table code snippet */ public void deleteTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.deleteTable("OfficeSupplies").subscribe( Void -> { }, error -> System.err.println("There was an error deleting the table. Error: " + error), () -> System.out.println("Table deletion successful.")); } /** * query tables code snippet */ public void queryTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); QueryOptions queryOptions = new QueryOptions().setFilter("TableName eq OfficeSupplies"); tableServiceAsyncClient.queryTables(queryOptions).subscribe(azureTable -> { System.out.println(azureTable.getName()); }, error -> { System.err.println("There was an error querying the service. Error: " + error); }); } /** * insert entity code snippet */ private void insertEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); Map<String, Object> properties = new HashMap<>(); properties.put("RowKey", "crayolaMarkers"); properties.put("PartitionKey", "markers"); tableAsyncClient.createEntity(properties).subscribe(tableEntity -> { System.out.println("Insert Entity Successful. Entity: " + tableEntity); }, error -> { System.err.println("There was an error inserting the Entity. Error: " + error); }); } /** * delete entity code snippet */ /** * upsert entity code snippet */ private void upsert() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { System.out.println("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.upsertEntity(UpdateMode.MERGE, tableEntity); return updateEntityMono; }).subscribe( Void -> { }, error -> System.err.println("There was an error upserting the Entity. Error: " + error), () -> System.out.println("Upsert Entity Successful.")); } /** * update entity code snippet */ private void update() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { System.out.println("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.updateEntity(UpdateMode.REPLACE, tableEntity); return updateEntityMono; }).subscribe( Void -> { }, error -> System.err.println("There was an error updating the Entity. Error: " + error), () -> System.out.println("Update Entity Successful.")); } /** * query entity code snippet */ private void queryEntities() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); QueryOptions queryOptions = new QueryOptions() .setFilter("Product eq markers") .setSelect("Seller, Price"); tableAsyncClient.queryEntities(queryOptions).subscribe(tableEntity -> { System.out.println("Table Entity: " + tableEntity); }, error -> { System.err.println("There was an error querying the table. Error: " + error); }); } /** * checks to see if an entity exists code snippet */ private void existsEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); tableAsyncClient.get("crayolaMarkers", "markers") .subscribe(tableEntity -> { System.out.println("Table Entity exists: " + tableEntity); }, error -> { System.err.println("There was an error getting the entity. Error: " + error); }); } }
These methods should return `Mono<TableEntity>` - the updated/upserted entity.
public Mono<Void> updateEntity(UpdateMode updateMode, TableEntity tableEntity) { return Mono.empty(); }
}
public Mono<Void> updateEntity(UpdateMode updateMode, TableEntity tableEntity) { return Mono.empty(); }
class TableAsyncClient { private final String tableName; TableAsyncClient(String tableName) { this.tableName = tableName; } /** * Queries and returns entities in the given table using the select and filter strings * * @param queryOptions the odata query object * @return a paged flux of all the entity which fit this criteria */ public PagedFlux<TableEntity> queryEntity(QueryOptions queryOptions) { return null; } /** * Queries and returns entities in the given table with the given rowKey and ParitionKey * * @param rowKey the given row key * @param partitionKey the given partition key * @return a list of the tables that fit the row and partition key */ public PagedFlux<TableEntity> queryEntitiesWithPartitionAndRowKey(String rowKey, String partitionKey) { return null; } /** * insert a TableEntity with the given properties and return that TableEntity. Property map must include * rowKey and partitionKey * * @param tableEntityProperties a map of properties for the TableEntity * @return the created TableEntity */ public Mono<TableEntity> createEntity(Map<String, Object> tableEntityProperties) { return Mono.empty(); } /** * based on Mode it either inserts or merges if exists or inserts or merges if exists * * @param updateMode type of upsert * @param tableEntity entity to upsert * @return void */ public Mono<Void> upsertEntity(UpdateMode updateMode, TableEntity tableEntity) { return Mono.empty(); } /** * based on Mode it either updates or fails if it does exists or replaces or fails if it does exists * * @param updateMode type of update * @param tableEntity entity to update * @return void */ /** * deletes the given entity * * @param tableEntity entity to delete * @return void */ public Mono<Void> deleteEntity(TableEntity tableEntity) { return Mono.empty(); } /** * deletes the given entity * * @param partitionKey the partition key * @param rowKey the row key * @return void */ public Mono<Void> deleteEntity(String partitionKey, String rowKey) { return Mono.empty(); } /** * returns the table name associated with the client * * @return table name */ public Mono<String> getTableName() { return Mono.empty(); } }
class TableAsyncClient { private final String tableName; TableAsyncClient(String tableName) { this.tableName = tableName; } /** * Queries and returns entities in the given table using the select and filter strings * * @param queryOptions the odata query object * @return a paged flux of all the entity which fit this criteria */ public PagedFlux<TableEntity> queryEntities(QueryOptions queryOptions) { return null; } /** * returns the entity with the given rowKey and ParitionKey * * @param rowKey the given row key * @param partitionKey the given partition key * @return an entity that fits the criteria */ public Mono<TableEntity> get(String rowKey, String partitionKey) { return null; } /** * insert a TableEntity with the given properties and return that TableEntity. Property map must include * rowKey and partitionKey * * @param tableEntityProperties a map of properties for the TableEntity * @return the created TableEntity */ public Mono<TableEntity> createEntity(Map<String, Object> tableEntityProperties) { return Mono.empty(); } /** * based on Mode it either inserts or merges if exists or inserts or merges if exists * * @param updateMode type of upsert * @param tableEntity entity to upsert * @return void */ public Mono<Void> upsertEntity(UpdateMode updateMode, TableEntity tableEntity) { return Mono.empty(); } /** * based on Mode it either updates or fails if it does exists or replaces or fails if it does exists * * @param updateMode type of update * @param tableEntity entity to update * @return void */ /** * deletes the given entity * * @param tableEntity entity to delete * @return void */ public Mono<Void> deleteEntity(TableEntity tableEntity) { return Mono.empty(); } /** * deletes the given entity * * @param partitionKey the partition key * @param rowKey the row key * @return void */ public Mono<Void> deleteEntity(String partitionKey, String rowKey) { return Mono.empty(); } /** * returns the table name associated with the client * * @return table name */ public String getTableName() { return tableName; } }
This returns a paged collection. So, you might have to check each `tableEntity` to find out if the one that you are testing for existence actually is in the returned collection.
private void existsEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); tableAsyncClient.queryEntitiesWithPartitionAndRowKey("crayolaMarkers", "markers") .subscribe(tableEntity -> { logger.info("Table Entity exists: " + tableEntity); }, error -> { logger.error("There was an error querying the table. Error: " + error); }); }
logger.info("Table Entity exists: " + tableEntity);
private void existsEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); tableAsyncClient.get("crayolaMarkers", "markers") .subscribe(tableEntity -> { System.out.println("Table Entity exists: " + tableEntity); }, error -> { System.err.println("There was an error getting the entity. Error: " + error); }); }
class TableServiceAsyncClientCodeSnippets { final ClientLogger logger = new ClientLogger("TableServiceAsyncClientCodeSnippets"); /** * create table code snippet */ public void createTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.createTable("OfficeSupplies").subscribe(Void -> { logger.info("Table creation successful."); }, error -> { logger.error("There was an error creating the table. Error: " + error); }); } /** * delete table code snippet */ public void deleteTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.deleteTable("OfficeSupplies").subscribe(Void -> { logger.info("Table deletion successful"); }, error -> { logger.error("There was an error deleting the table. Error: " + error); }); } /** * query tables code snippet */ public void queryTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); QueryOptions queryOptions = new QueryOptions(); queryOptions.setFilter("TableName eq OfficeSupplies"); tableServiceAsyncClient.queryTables(queryOptions).subscribe(azureTable -> { logger.info(azureTable.getName()); }, error -> { logger.error("There was an error querying the service. Error: " + error); }); } /** * insert entity code snippet */ private void insertEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); Map<String, Object> properties = new HashMap<>(); properties.put("RowKey", "crayolaMarkers"); properties.put("PartitionKey", "markers"); tableAsyncClient.createEntity(properties).subscribe(tableEntity -> { logger.info("Insert Entity Successful. Entity: " + tableEntity); }, error -> { logger.error("There was an error inserting the Entity. Error: " + error); }); } /** * delete entity code snippet */ private void deleteEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); QueryOptions queryOptions = new QueryOptions(); queryOptions.setFilter("RowKey eq crayolaMarkers"); tableAsyncClient.queryEntity(queryOptions).flatMap(tableEntity -> { logger.info("Table Entity: " + tableEntity); Mono<Void> deleteEntityMono = tableAsyncClient.deleteEntity(tableEntity); return deleteEntityMono; }).subscribe(Void -> { logger.info("Delete Entity Successful."); }, error -> { logger.error("There was an error deleting the Entity. Error: " + error); }); } /** * upsert entity code snippet */ private void upsert() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); QueryOptions queryOptions = new QueryOptions(); queryOptions.setFilter("RowKey eq crayolaMarkers"); tableAsyncClient.queryEntity(queryOptions).flatMap(tableEntity -> { logger.info("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.upsertEntity(UpdateMode.Merge, tableEntity); return updateEntityMono; }).subscribe(Void -> { logger.info("Update Entity Successful."); }, error -> { logger.error("There was an error updating the Entity. Error: " + error); }); } /** * update entity code snippet */ private void update() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); QueryOptions queryOptions = new QueryOptions(); queryOptions.setFilter("RowKey eq crayolaMarkers"); tableAsyncClient.queryEntity(queryOptions).flatMap(tableEntity -> { logger.info("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.updateEntity(UpdateMode.Replace, tableEntity); return updateEntityMono; }).subscribe(Void -> { logger.info("Update Entity Successful."); }, error -> { logger.error("There was an error updating the Entity. Error: " + error); }); } /** * query entity code snippet */ private void queryEntities() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); QueryOptions queryOptions = new QueryOptions(); queryOptions.setFilter("Product eq markers"); queryOptions.setSelect("Seller, Price"); tableAsyncClient.queryEntity(queryOptions).subscribe(tableEntity -> { logger.info("Table Entity: " + tableEntity); }, error -> { logger.error("There was an error querying the table. Error: " + error); }); } /** * checks to see if an entity exists code snippet */ }
class TableServiceAsyncClientCodeSnippets { /** * create table code snippet */ public void createTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.createTable("OfficeSupplies").subscribe( Void -> { }, error -> System.err.println("There was an error creating the table. Error: " + error), () -> System.out.println("Table creation successful.")); } /** * delete table code snippet */ public void deleteTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.deleteTable("OfficeSupplies").subscribe( Void -> { }, error -> System.err.println("There was an error deleting the table. Error: " + error), () -> System.out.println("Table deletion successful.")); } /** * query tables code snippet */ public void queryTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); QueryOptions queryOptions = new QueryOptions().setFilter("TableName eq OfficeSupplies"); tableServiceAsyncClient.queryTables(queryOptions).subscribe(azureTable -> { System.out.println(azureTable.getName()); }, error -> { System.err.println("There was an error querying the service. Error: " + error); }); } /** * insert entity code snippet */ private void insertEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); Map<String, Object> properties = new HashMap<>(); properties.put("RowKey", "crayolaMarkers"); properties.put("PartitionKey", "markers"); tableAsyncClient.createEntity(properties).subscribe(tableEntity -> { System.out.println("Insert Entity Successful. Entity: " + tableEntity); }, error -> { System.err.println("There was an error inserting the Entity. Error: " + error); }); } /** * delete entity code snippet */ private void deleteEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { System.out.println("Table Entity: " + tableEntity); return tableAsyncClient.deleteEntity(tableEntity); }).subscribe( Void -> { }, error -> System.err.println("There was an error deleting the Entity. Error: " + error), () -> System.out.println("Delete Entity Successful.")); } /** * upsert entity code snippet */ private void upsert() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { System.out.println("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.upsertEntity(UpdateMode.MERGE, tableEntity); return updateEntityMono; }).subscribe( Void -> { }, error -> System.err.println("There was an error upserting the Entity. Error: " + error), () -> System.out.println("Upsert Entity Successful.")); } /** * update entity code snippet */ private void update() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { System.out.println("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.updateEntity(UpdateMode.REPLACE, tableEntity); return updateEntityMono; }).subscribe( Void -> { }, error -> System.err.println("There was an error updating the Entity. Error: " + error), () -> System.out.println("Update Entity Successful.")); } /** * query entity code snippet */ private void queryEntities() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); QueryOptions queryOptions = new QueryOptions() .setFilter("Product eq markers") .setSelect("Seller, Price"); tableAsyncClient.queryEntities(queryOptions).subscribe(tableEntity -> { System.out.println("Table Entity: " + tableEntity); }, error -> { System.err.println("There was an error querying the table. Error: " + error); }); } /** * checks to see if an entity exists code snippet */ }
I brought this up during our tables meeting and we decided we have to talk with the Tables service team since the Merge API call returns at 204 (no content) so we are still deciding how best to return something if that underlying functionality doesn't return an entity. So we are asking them about hero scenarios and what users would want from this type of function
public Mono<Void> updateEntity(UpdateMode updateMode, TableEntity tableEntity) { return Mono.empty(); }
}
public Mono<Void> updateEntity(UpdateMode updateMode, TableEntity tableEntity) { return Mono.empty(); }
class TableAsyncClient { private final String tableName; TableAsyncClient(String tableName) { this.tableName = tableName; } /** * Queries and returns entities in the given table using the select and filter strings * * @param queryOptions the odata query object * @return a paged flux of all the entity which fit this criteria */ public PagedFlux<TableEntity> queryEntity(QueryOptions queryOptions) { return null; } /** * Queries and returns entities in the given table with the given rowKey and ParitionKey * * @param rowKey the given row key * @param partitionKey the given partition key * @return a list of the tables that fit the row and partition key */ public PagedFlux<TableEntity> queryEntitiesWithPartitionAndRowKey(String rowKey, String partitionKey) { return null; } /** * insert a TableEntity with the given properties and return that TableEntity. Property map must include * rowKey and partitionKey * * @param tableEntityProperties a map of properties for the TableEntity * @return the created TableEntity */ public Mono<TableEntity> createEntity(Map<String, Object> tableEntityProperties) { return Mono.empty(); } /** * based on Mode it either inserts or merges if exists or inserts or merges if exists * * @param updateMode type of upsert * @param tableEntity entity to upsert * @return void */ public Mono<Void> upsertEntity(UpdateMode updateMode, TableEntity tableEntity) { return Mono.empty(); } /** * based on Mode it either updates or fails if it does exists or replaces or fails if it does exists * * @param updateMode type of update * @param tableEntity entity to update * @return void */ /** * deletes the given entity * * @param tableEntity entity to delete * @return void */ public Mono<Void> deleteEntity(TableEntity tableEntity) { return Mono.empty(); } /** * deletes the given entity * * @param partitionKey the partition key * @param rowKey the row key * @return void */ public Mono<Void> deleteEntity(String partitionKey, String rowKey) { return Mono.empty(); } /** * returns the table name associated with the client * * @return table name */ public Mono<String> getTableName() { return Mono.empty(); } }
class TableAsyncClient { private final String tableName; TableAsyncClient(String tableName) { this.tableName = tableName; } /** * Queries and returns entities in the given table using the select and filter strings * * @param queryOptions the odata query object * @return a paged flux of all the entity which fit this criteria */ public PagedFlux<TableEntity> queryEntities(QueryOptions queryOptions) { return null; } /** * returns the entity with the given rowKey and ParitionKey * * @param rowKey the given row key * @param partitionKey the given partition key * @return an entity that fits the criteria */ public Mono<TableEntity> get(String rowKey, String partitionKey) { return null; } /** * insert a TableEntity with the given properties and return that TableEntity. Property map must include * rowKey and partitionKey * * @param tableEntityProperties a map of properties for the TableEntity * @return the created TableEntity */ public Mono<TableEntity> createEntity(Map<String, Object> tableEntityProperties) { return Mono.empty(); } /** * based on Mode it either inserts or merges if exists or inserts or merges if exists * * @param updateMode type of upsert * @param tableEntity entity to upsert * @return void */ public Mono<Void> upsertEntity(UpdateMode updateMode, TableEntity tableEntity) { return Mono.empty(); } /** * based on Mode it either updates or fails if it does exists or replaces or fails if it does exists * * @param updateMode type of update * @param tableEntity entity to update * @return void */ /** * deletes the given entity * * @param tableEntity entity to delete * @return void */ public Mono<Void> deleteEntity(TableEntity tableEntity) { return Mono.empty(); } /** * deletes the given entity * * @param partitionKey the partition key * @param rowKey the row key * @return void */ public Mono<Void> deleteEntity(String partitionKey, String rowKey) { return Mono.empty(); } /** * returns the table name associated with the client * * @return table name */ public String getTableName() { return tableName; } }
updated using get() method.
private void existsEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); tableAsyncClient.queryEntitiesWithPartitionAndRowKey("crayolaMarkers", "markers") .subscribe(tableEntity -> { logger.info("Table Entity exists: " + tableEntity); }, error -> { logger.error("There was an error querying the table. Error: " + error); }); }
logger.info("Table Entity exists: " + tableEntity);
private void existsEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); tableAsyncClient.get("crayolaMarkers", "markers") .subscribe(tableEntity -> { System.out.println("Table Entity exists: " + tableEntity); }, error -> { System.err.println("There was an error getting the entity. Error: " + error); }); }
class TableServiceAsyncClientCodeSnippets { final ClientLogger logger = new ClientLogger("TableServiceAsyncClientCodeSnippets"); /** * create table code snippet */ public void createTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.createTable("OfficeSupplies").subscribe(Void -> { logger.info("Table creation successful."); }, error -> { logger.error("There was an error creating the table. Error: " + error); }); } /** * delete table code snippet */ public void deleteTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.deleteTable("OfficeSupplies").subscribe(Void -> { logger.info("Table deletion successful"); }, error -> { logger.error("There was an error deleting the table. Error: " + error); }); } /** * query tables code snippet */ public void queryTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); QueryOptions queryOptions = new QueryOptions(); queryOptions.setFilter("TableName eq OfficeSupplies"); tableServiceAsyncClient.queryTables(queryOptions).subscribe(azureTable -> { logger.info(azureTable.getName()); }, error -> { logger.error("There was an error querying the service. Error: " + error); }); } /** * insert entity code snippet */ private void insertEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); Map<String, Object> properties = new HashMap<>(); properties.put("RowKey", "crayolaMarkers"); properties.put("PartitionKey", "markers"); tableAsyncClient.createEntity(properties).subscribe(tableEntity -> { logger.info("Insert Entity Successful. Entity: " + tableEntity); }, error -> { logger.error("There was an error inserting the Entity. Error: " + error); }); } /** * delete entity code snippet */ private void deleteEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); QueryOptions queryOptions = new QueryOptions(); queryOptions.setFilter("RowKey eq crayolaMarkers"); tableAsyncClient.queryEntity(queryOptions).flatMap(tableEntity -> { logger.info("Table Entity: " + tableEntity); Mono<Void> deleteEntityMono = tableAsyncClient.deleteEntity(tableEntity); return deleteEntityMono; }).subscribe(Void -> { logger.info("Delete Entity Successful."); }, error -> { logger.error("There was an error deleting the Entity. Error: " + error); }); } /** * upsert entity code snippet */ private void upsert() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); QueryOptions queryOptions = new QueryOptions(); queryOptions.setFilter("RowKey eq crayolaMarkers"); tableAsyncClient.queryEntity(queryOptions).flatMap(tableEntity -> { logger.info("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.upsertEntity(UpdateMode.Merge, tableEntity); return updateEntityMono; }).subscribe(Void -> { logger.info("Update Entity Successful."); }, error -> { logger.error("There was an error updating the Entity. Error: " + error); }); } /** * update entity code snippet */ private void update() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); QueryOptions queryOptions = new QueryOptions(); queryOptions.setFilter("RowKey eq crayolaMarkers"); tableAsyncClient.queryEntity(queryOptions).flatMap(tableEntity -> { logger.info("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.updateEntity(UpdateMode.Replace, tableEntity); return updateEntityMono; }).subscribe(Void -> { logger.info("Update Entity Successful."); }, error -> { logger.error("There was an error updating the Entity. Error: " + error); }); } /** * query entity code snippet */ private void queryEntities() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); QueryOptions queryOptions = new QueryOptions(); queryOptions.setFilter("Product eq markers"); queryOptions.setSelect("Seller, Price"); tableAsyncClient.queryEntity(queryOptions).subscribe(tableEntity -> { logger.info("Table Entity: " + tableEntity); }, error -> { logger.error("There was an error querying the table. Error: " + error); }); } /** * checks to see if an entity exists code snippet */ }
class TableServiceAsyncClientCodeSnippets { /** * create table code snippet */ public void createTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.createTable("OfficeSupplies").subscribe( Void -> { }, error -> System.err.println("There was an error creating the table. Error: " + error), () -> System.out.println("Table creation successful.")); } /** * delete table code snippet */ public void deleteTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.deleteTable("OfficeSupplies").subscribe( Void -> { }, error -> System.err.println("There was an error deleting the table. Error: " + error), () -> System.out.println("Table deletion successful.")); } /** * query tables code snippet */ public void queryTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); QueryOptions queryOptions = new QueryOptions().setFilter("TableName eq OfficeSupplies"); tableServiceAsyncClient.queryTables(queryOptions).subscribe(azureTable -> { System.out.println(azureTable.getName()); }, error -> { System.err.println("There was an error querying the service. Error: " + error); }); } /** * insert entity code snippet */ private void insertEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); Map<String, Object> properties = new HashMap<>(); properties.put("RowKey", "crayolaMarkers"); properties.put("PartitionKey", "markers"); tableAsyncClient.createEntity(properties).subscribe(tableEntity -> { System.out.println("Insert Entity Successful. Entity: " + tableEntity); }, error -> { System.err.println("There was an error inserting the Entity. Error: " + error); }); } /** * delete entity code snippet */ private void deleteEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { System.out.println("Table Entity: " + tableEntity); return tableAsyncClient.deleteEntity(tableEntity); }).subscribe( Void -> { }, error -> System.err.println("There was an error deleting the Entity. Error: " + error), () -> System.out.println("Delete Entity Successful.")); } /** * upsert entity code snippet */ private void upsert() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { System.out.println("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.upsertEntity(UpdateMode.MERGE, tableEntity); return updateEntityMono; }).subscribe( Void -> { }, error -> System.err.println("There was an error upserting the Entity. Error: " + error), () -> System.out.println("Upsert Entity Successful.")); } /** * update entity code snippet */ private void update() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { System.out.println("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.updateEntity(UpdateMode.REPLACE, tableEntity); return updateEntityMono; }).subscribe( Void -> { }, error -> System.err.println("There was an error updating the Entity. Error: " + error), () -> System.out.println("Update Entity Successful.")); } /** * query entity code snippet */ private void queryEntities() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); QueryOptions queryOptions = new QueryOptions() .setFilter("Product eq markers") .setSelect("Seller, Price"); tableAsyncClient.queryEntities(queryOptions).subscribe(tableEntity -> { System.out.println("Table Entity: " + tableEntity); }, error -> { System.err.println("There was an error querying the table. Error: " + error); }); } /** * checks to see if an entity exists code snippet */ }
System.err.println` over ClientLogger
public void createTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.createTable("OfficeSupplies").subscribe( Void -> {}, error -> logger.error("There was an error creating the table. Error: " + error), () -> logger.info("Table creation successful.")); }
error -> logger.error("There was an error creating the table. Error: " + error),
public void createTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.createTable("OfficeSupplies").subscribe( Void -> { }, error -> System.err.println("There was an error creating the table. Error: " + error), () -> System.out.println("Table creation successful.")); }
class TableServiceAsyncClientCodeSnippets { final ClientLogger logger = new ClientLogger("TableServiceAsyncClientCodeSnippets"); /** * create table code snippet */ /** * delete table code snippet */ public void deleteTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.deleteTable("OfficeSupplies").subscribe( Void -> {}, error -> logger.error("There was an error deleting the table. Error: " + error), () -> logger.info("Table deletion successful.")); } /** * query tables code snippet */ public void queryTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); QueryOptions queryOptions = new QueryOptions(); queryOptions.setFilter("TableName eq OfficeSupplies"); tableServiceAsyncClient.queryTables(queryOptions).subscribe(azureTable -> { logger.info(azureTable.getName()); }, error -> { logger.error("There was an error querying the service. Error: " + error); }); } /** * insert entity code snippet */ private void insertEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); Map<String, Object> properties = new HashMap<>(); properties.put("RowKey", "crayolaMarkers"); properties.put("PartitionKey", "markers"); tableAsyncClient.createEntity(properties).subscribe(tableEntity -> { logger.info("Insert Entity Successful. Entity: " + tableEntity); }, error -> { logger.error("There was an error inserting the Entity. Error: " + error); }); } /** * delete entity code snippet */ private void deleteEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { logger.info("Table Entity: " + tableEntity); Mono<Void> deleteEntityMono = tableAsyncClient.deleteEntity(tableEntity); return deleteEntityMono; }).subscribe( Void -> {}, error -> logger.error("There was an error deleting the Entity. Error: " + error), () -> logger.info("Delete Entity Successful.")); } /** * upsert entity code snippet */ private void upsert() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { logger.info("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.upsertEntity(UpdateMode.Merge, tableEntity); return updateEntityMono; }).subscribe( Void -> {}, error -> logger.error("There was an error upserting the Entity. Error: " + error), () -> logger.info("Upsert Entity Successful.")); } /** * update entity code snippet */ private void update() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { logger.info("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.updateEntity(UpdateMode.Replace, tableEntity); return updateEntityMono; }).subscribe( Void -> {}, error -> logger.error("There was an error updating the Entity. Error: " + error), () -> logger.info("Update Entity Successful.")); } /** * query entity code snippet */ private void queryEntities() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); QueryOptions queryOptions = new QueryOptions(); queryOptions.setFilter("Product eq markers"); queryOptions.setSelect("Seller, Price"); tableAsyncClient.queryEntities(queryOptions).subscribe(tableEntity -> { logger.info("Table Entity: " + tableEntity); }, error -> { logger.error("There was an error querying the table. Error: " + error); }); } /** * checks to see if an entity exists code snippet */ private void existsEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); tableAsyncClient.get("crayolaMarkers", "markers") .subscribe(tableEntity -> { logger.info("Table Entity exists: " + tableEntity); }, error -> { logger.error("There was an error getting the entity. Error: " + error); }); } }
class TableServiceAsyncClientCodeSnippets { /** * create table code snippet */ /** * delete table code snippet */ public void deleteTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.deleteTable("OfficeSupplies").subscribe( Void -> { }, error -> System.err.println("There was an error deleting the table. Error: " + error), () -> System.out.println("Table deletion successful.")); } /** * query tables code snippet */ public void queryTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); QueryOptions queryOptions = new QueryOptions().setFilter("TableName eq OfficeSupplies"); tableServiceAsyncClient.queryTables(queryOptions).subscribe(azureTable -> { System.out.println(azureTable.getName()); }, error -> { System.err.println("There was an error querying the service. Error: " + error); }); } /** * insert entity code snippet */ private void insertEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); Map<String, Object> properties = new HashMap<>(); properties.put("RowKey", "crayolaMarkers"); properties.put("PartitionKey", "markers"); tableAsyncClient.createEntity(properties).subscribe(tableEntity -> { System.out.println("Insert Entity Successful. Entity: " + tableEntity); }, error -> { System.err.println("There was an error inserting the Entity. Error: " + error); }); } /** * delete entity code snippet */ private void deleteEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { System.out.println("Table Entity: " + tableEntity); return tableAsyncClient.deleteEntity(tableEntity); }).subscribe( Void -> { }, error -> System.err.println("There was an error deleting the Entity. Error: " + error), () -> System.out.println("Delete Entity Successful.")); } /** * upsert entity code snippet */ private void upsert() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { System.out.println("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.upsertEntity(UpdateMode.MERGE, tableEntity); return updateEntityMono; }).subscribe( Void -> { }, error -> System.err.println("There was an error upserting the Entity. Error: " + error), () -> System.out.println("Upsert Entity Successful.")); } /** * update entity code snippet */ private void update() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { System.out.println("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.updateEntity(UpdateMode.REPLACE, tableEntity); return updateEntityMono; }).subscribe( Void -> { }, error -> System.err.println("There was an error updating the Entity. Error: " + error), () -> System.out.println("Update Entity Successful.")); } /** * query entity code snippet */ private void queryEntities() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); QueryOptions queryOptions = new QueryOptions() .setFilter("Product eq markers") .setSelect("Seller, Price"); tableAsyncClient.queryEntities(queryOptions).subscribe(tableEntity -> { System.out.println("Table Entity: " + tableEntity); }, error -> { System.err.println("There was an error querying the table. Error: " + error); }); } /** * checks to see if an entity exists code snippet */ private void existsEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); tableAsyncClient.get("crayolaMarkers", "markers") .subscribe(tableEntity -> { System.out.println("Table Entity exists: " + tableEntity); }, error -> { System.err.println("There was an error getting the entity. Error: " + error); }); } }
Highlight usage of the fluent model. var queryOptions = new QueryOptions().setFilter("TableName eq OfficeSupplies");
public void queryTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); QueryOptions queryOptions = new QueryOptions(); queryOptions.setFilter("TableName eq OfficeSupplies"); tableServiceAsyncClient.queryTables(queryOptions).subscribe(azureTable -> { logger.info(azureTable.getName()); }, error -> { logger.error("There was an error querying the service. Error: " + error); }); }
QueryOptions queryOptions = new QueryOptions();
public void queryTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); QueryOptions queryOptions = new QueryOptions().setFilter("TableName eq OfficeSupplies"); tableServiceAsyncClient.queryTables(queryOptions).subscribe(azureTable -> { System.out.println(azureTable.getName()); }, error -> { System.err.println("There was an error querying the service. Error: " + error); }); }
class TableServiceAsyncClientCodeSnippets { final ClientLogger logger = new ClientLogger("TableServiceAsyncClientCodeSnippets"); /** * create table code snippet */ public void createTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.createTable("OfficeSupplies").subscribe( Void -> {}, error -> logger.error("There was an error creating the table. Error: " + error), () -> logger.info("Table creation successful.")); } /** * delete table code snippet */ public void deleteTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.deleteTable("OfficeSupplies").subscribe( Void -> {}, error -> logger.error("There was an error deleting the table. Error: " + error), () -> logger.info("Table deletion successful.")); } /** * query tables code snippet */ /** * insert entity code snippet */ private void insertEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); Map<String, Object> properties = new HashMap<>(); properties.put("RowKey", "crayolaMarkers"); properties.put("PartitionKey", "markers"); tableAsyncClient.createEntity(properties).subscribe(tableEntity -> { logger.info("Insert Entity Successful. Entity: " + tableEntity); }, error -> { logger.error("There was an error inserting the Entity. Error: " + error); }); } /** * delete entity code snippet */ private void deleteEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { logger.info("Table Entity: " + tableEntity); Mono<Void> deleteEntityMono = tableAsyncClient.deleteEntity(tableEntity); return deleteEntityMono; }).subscribe( Void -> {}, error -> logger.error("There was an error deleting the Entity. Error: " + error), () -> logger.info("Delete Entity Successful.")); } /** * upsert entity code snippet */ private void upsert() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { logger.info("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.upsertEntity(UpdateMode.Merge, tableEntity); return updateEntityMono; }).subscribe( Void -> {}, error -> logger.error("There was an error upserting the Entity. Error: " + error), () -> logger.info("Upsert Entity Successful.")); } /** * update entity code snippet */ private void update() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { logger.info("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.updateEntity(UpdateMode.Replace, tableEntity); return updateEntityMono; }).subscribe( Void -> {}, error -> logger.error("There was an error updating the Entity. Error: " + error), () -> logger.info("Update Entity Successful.")); } /** * query entity code snippet */ private void queryEntities() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); QueryOptions queryOptions = new QueryOptions(); queryOptions.setFilter("Product eq markers"); queryOptions.setSelect("Seller, Price"); tableAsyncClient.queryEntities(queryOptions).subscribe(tableEntity -> { logger.info("Table Entity: " + tableEntity); }, error -> { logger.error("There was an error querying the table. Error: " + error); }); } /** * checks to see if an entity exists code snippet */ private void existsEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); tableAsyncClient.get("crayolaMarkers", "markers") .subscribe(tableEntity -> { logger.info("Table Entity exists: " + tableEntity); }, error -> { logger.error("There was an error getting the entity. Error: " + error); }); } }
class TableServiceAsyncClientCodeSnippets { /** * create table code snippet */ public void createTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.createTable("OfficeSupplies").subscribe( Void -> { }, error -> System.err.println("There was an error creating the table. Error: " + error), () -> System.out.println("Table creation successful.")); } /** * delete table code snippet */ public void deleteTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.deleteTable("OfficeSupplies").subscribe( Void -> { }, error -> System.err.println("There was an error deleting the table. Error: " + error), () -> System.out.println("Table deletion successful.")); } /** * query tables code snippet */ /** * insert entity code snippet */ private void insertEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); Map<String, Object> properties = new HashMap<>(); properties.put("RowKey", "crayolaMarkers"); properties.put("PartitionKey", "markers"); tableAsyncClient.createEntity(properties).subscribe(tableEntity -> { System.out.println("Insert Entity Successful. Entity: " + tableEntity); }, error -> { System.err.println("There was an error inserting the Entity. Error: " + error); }); } /** * delete entity code snippet */ private void deleteEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { System.out.println("Table Entity: " + tableEntity); return tableAsyncClient.deleteEntity(tableEntity); }).subscribe( Void -> { }, error -> System.err.println("There was an error deleting the Entity. Error: " + error), () -> System.out.println("Delete Entity Successful.")); } /** * upsert entity code snippet */ private void upsert() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { System.out.println("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.upsertEntity(UpdateMode.MERGE, tableEntity); return updateEntityMono; }).subscribe( Void -> { }, error -> System.err.println("There was an error upserting the Entity. Error: " + error), () -> System.out.println("Upsert Entity Successful.")); } /** * update entity code snippet */ private void update() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { System.out.println("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.updateEntity(UpdateMode.REPLACE, tableEntity); return updateEntityMono; }).subscribe( Void -> { }, error -> System.err.println("There was an error updating the Entity. Error: " + error), () -> System.out.println("Update Entity Successful.")); } /** * query entity code snippet */ private void queryEntities() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); QueryOptions queryOptions = new QueryOptions() .setFilter("Product eq markers") .setSelect("Seller, Price"); tableAsyncClient.queryEntities(queryOptions).subscribe(tableEntity -> { System.out.println("Table Entity: " + tableEntity); }, error -> { System.err.println("There was an error querying the table. Error: " + error); }); } /** * checks to see if an entity exists code snippet */ private void existsEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); tableAsyncClient.get("crayolaMarkers", "markers") .subscribe(tableEntity -> { System.out.println("Table Entity exists: " + tableEntity); }, error -> { System.err.println("There was an error getting the entity. Error: " + error); }); } }
No need for the intermediate local variable `deleteEntityMono`. Just return tableAsyncClient.delete....
private void deleteEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { logger.info("Table Entity: " + tableEntity); Mono<Void> deleteEntityMono = tableAsyncClient.deleteEntity(tableEntity); return deleteEntityMono; }).subscribe( Void -> {}, error -> logger.error("There was an error deleting the Entity. Error: " + error), () -> logger.info("Delete Entity Successful.")); }
return deleteEntityMono;
private void deleteEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { System.out.println("Table Entity: " + tableEntity); return tableAsyncClient.deleteEntity(tableEntity); }).subscribe( Void -> { }, error -> System.err.println("There was an error deleting the Entity. Error: " + error), () -> System.out.println("Delete Entity Successful.")); }
class TableServiceAsyncClientCodeSnippets { final ClientLogger logger = new ClientLogger("TableServiceAsyncClientCodeSnippets"); /** * create table code snippet */ public void createTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.createTable("OfficeSupplies").subscribe( Void -> {}, error -> logger.error("There was an error creating the table. Error: " + error), () -> logger.info("Table creation successful.")); } /** * delete table code snippet */ public void deleteTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.deleteTable("OfficeSupplies").subscribe( Void -> {}, error -> logger.error("There was an error deleting the table. Error: " + error), () -> logger.info("Table deletion successful.")); } /** * query tables code snippet */ public void queryTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); QueryOptions queryOptions = new QueryOptions(); queryOptions.setFilter("TableName eq OfficeSupplies"); tableServiceAsyncClient.queryTables(queryOptions).subscribe(azureTable -> { logger.info(azureTable.getName()); }, error -> { logger.error("There was an error querying the service. Error: " + error); }); } /** * insert entity code snippet */ private void insertEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); Map<String, Object> properties = new HashMap<>(); properties.put("RowKey", "crayolaMarkers"); properties.put("PartitionKey", "markers"); tableAsyncClient.createEntity(properties).subscribe(tableEntity -> { logger.info("Insert Entity Successful. Entity: " + tableEntity); }, error -> { logger.error("There was an error inserting the Entity. Error: " + error); }); } /** * delete entity code snippet */ /** * upsert entity code snippet */ private void upsert() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { logger.info("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.upsertEntity(UpdateMode.Merge, tableEntity); return updateEntityMono; }).subscribe( Void -> {}, error -> logger.error("There was an error upserting the Entity. Error: " + error), () -> logger.info("Upsert Entity Successful.")); } /** * update entity code snippet */ private void update() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { logger.info("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.updateEntity(UpdateMode.Replace, tableEntity); return updateEntityMono; }).subscribe( Void -> {}, error -> logger.error("There was an error updating the Entity. Error: " + error), () -> logger.info("Update Entity Successful.")); } /** * query entity code snippet */ private void queryEntities() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); QueryOptions queryOptions = new QueryOptions(); queryOptions.setFilter("Product eq markers"); queryOptions.setSelect("Seller, Price"); tableAsyncClient.queryEntities(queryOptions).subscribe(tableEntity -> { logger.info("Table Entity: " + tableEntity); }, error -> { logger.error("There was an error querying the table. Error: " + error); }); } /** * checks to see if an entity exists code snippet */ private void existsEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); tableAsyncClient.get("crayolaMarkers", "markers") .subscribe(tableEntity -> { logger.info("Table Entity exists: " + tableEntity); }, error -> { logger.error("There was an error getting the entity. Error: " + error); }); } }
class TableServiceAsyncClientCodeSnippets { /** * create table code snippet */ public void createTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.createTable("OfficeSupplies").subscribe( Void -> { }, error -> System.err.println("There was an error creating the table. Error: " + error), () -> System.out.println("Table creation successful.")); } /** * delete table code snippet */ public void deleteTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); tableServiceAsyncClient.deleteTable("OfficeSupplies").subscribe( Void -> { }, error -> System.err.println("There was an error deleting the table. Error: " + error), () -> System.out.println("Table deletion successful.")); } /** * query tables code snippet */ public void queryTable() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); QueryOptions queryOptions = new QueryOptions().setFilter("TableName eq OfficeSupplies"); tableServiceAsyncClient.queryTables(queryOptions).subscribe(azureTable -> { System.out.println(azureTable.getName()); }, error -> { System.err.println("There was an error querying the service. Error: " + error); }); } /** * insert entity code snippet */ private void insertEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); Map<String, Object> properties = new HashMap<>(); properties.put("RowKey", "crayolaMarkers"); properties.put("PartitionKey", "markers"); tableAsyncClient.createEntity(properties).subscribe(tableEntity -> { System.out.println("Insert Entity Successful. Entity: " + tableEntity); }, error -> { System.err.println("There was an error inserting the Entity. Error: " + error); }); } /** * delete entity code snippet */ /** * upsert entity code snippet */ private void upsert() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { System.out.println("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.upsertEntity(UpdateMode.MERGE, tableEntity); return updateEntityMono; }).subscribe( Void -> { }, error -> System.err.println("There was an error upserting the Entity. Error: " + error), () -> System.out.println("Upsert Entity Successful.")); } /** * update entity code snippet */ private void update() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); String rowKey = "crayolaMarkers"; String partitionKey = "markers"; tableAsyncClient.get(rowKey, partitionKey).flatMap(tableEntity -> { System.out.println("Table Entity: " + tableEntity); tableEntity.addProperty("Price", "5"); Mono<Void> updateEntityMono = tableAsyncClient.updateEntity(UpdateMode.REPLACE, tableEntity); return updateEntityMono; }).subscribe( Void -> { }, error -> System.err.println("There was an error updating the Entity. Error: " + error), () -> System.out.println("Update Entity Successful.")); } /** * query entity code snippet */ private void queryEntities() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); QueryOptions queryOptions = new QueryOptions() .setFilter("Product eq markers") .setSelect("Seller, Price"); tableAsyncClient.queryEntities(queryOptions).subscribe(tableEntity -> { System.out.println("Table Entity: " + tableEntity); }, error -> { System.err.println("There was an error querying the table. Error: " + error); }); } /** * checks to see if an entity exists code snippet */ private void existsEntity() { TableServiceAsyncClient tableServiceAsyncClient = new TableServiceClientBuilder() .connectionString("connectionString") .buildAsyncClient(); TableAsyncClient tableAsyncClient = tableServiceAsyncClient.getTableAsyncClient("OfficeSupplies"); tableAsyncClient.get("crayolaMarkers", "markers") .subscribe(tableEntity -> { System.out.println("Table Entity exists: " + tableEntity); }, error -> { System.err.println("There was an error getting the entity. Error: " + error); }); } }
Should I be more explicit with source in the name?
public BlobDownloadHeaders() { objectReplicationPolicies = null; }
objectReplicationPolicies = null;
public BlobDownloadHeaders() { objectReplicationSourcePolicies = null; }
class BlobDownloadHeaders { /** * Instantiates an empty {@code BlobDownloadHeaders}. */ /** * Instantiates a {@code BlobDownloadHeaders} object based on the generated, internal version of the type. * @param headers The generated headers type from which to extract values. */ public BlobDownloadHeaders(com.azure.storage.blob.implementation.models.BlobDownloadHeaders headers) { /* We have these two types because we needed to update this interface in a way that could not be generated (getObjectReplicationSourcePolicies), so we switched to generating BlobDownloadHeaders into implementation and wrapping it. Because it's headers type, we couldn't change the name of the generated type. */ this.lastModified = headers.getLastModified(); this.metadata = headers.getMetadata(); this.eTag = headers.getETag(); this.contentLength = headers.getContentLength(); this.contentType = headers.getContentType(); this.contentRange = headers.getContentRange(); this.contentEncoding = headers.getContentEncoding(); this.contentLanguage = headers.getContentLanguage(); this.contentMd5 = headers.getContentMd5(); this.contentDisposition = headers.getContentDisposition(); this.cacheControl = headers.getCacheControl(); this.blobSequenceNumber = headers.getBlobSequenceNumber(); this.blobType = headers.getBlobType(); this.leaseStatus = headers.getLeaseStatus(); this.leaseState = headers.getLeaseState(); this.leaseDuration = headers.getLeaseDuration(); this.copyId = headers.getCopyId(); this.copyStatus = headers.getCopyStatus(); this.copySource = headers.getCopySource(); this.copyProgress = headers.getCopyProgress(); this.copyCompletionTime = headers.getCopyCompletionTime(); this.copyStatusDescription = headers.getCopyStatusDescription(); this.isServerEncrypted = headers.isServerEncrypted(); this.clientRequestId = headers.getClientRequestId(); this.requestId = headers.getRequestId(); this.version = headers.getVersion(); this.versionId = headers.getVersionId(); this.acceptRanges = headers.getAcceptRanges(); this.dateProperty = headers.getDateProperty(); this.blobCommittedBlockCount = headers.getBlobCommittedBlockCount(); this.encryptionKeySha256 = headers.getEncryptionKeySha256(); this.encryptionScope = headers.getEncryptionScope(); this.blobContentMD5 = headers.getBlobContentMD5(); this.contentCrc64 = headers.getContentCrc64(); this.errorCode = headers.getErrorCode(); this.tagCount = headers.getTagCount(); Map<String, String> objectReplicationStatus = headers.getObjectReplicationRules(); this.objectReplicationPolicies = new ArrayList<>(); objectReplicationStatus = objectReplicationStatus == null ? new HashMap<>() : objectReplicationStatus; this.objectReplicationDestinationPolicyId = objectReplicationStatus.getOrDefault("policy-id", null); if (this.objectReplicationDestinationPolicyId == null) { for (Map.Entry<String, String> entry : objectReplicationStatus.entrySet()) { String[] split = entry.getKey().split("_"); String policyId = split[0]; String ruleId = split[1]; ObjectReplicationRule rule = new ObjectReplicationRule(ruleId, ObjectReplicationStatus.fromString(entry.getValue())); int index = ObjectReplicationPolicy.getIndexOfObjectReplicationPolicy(policyId, this.objectReplicationPolicies); if (index == -1) { ObjectReplicationPolicy policy = new ObjectReplicationPolicy(policyId); policy.putRule(rule); this.objectReplicationPolicies.add(policy); } else { ObjectReplicationPolicy policy = objectReplicationPolicies.get(index); policy.putRule(rule); } } } } /* * Returns the date and time the container was last modified. Any operation * that modifies the blob, including an update of the blob's metadata or * properties, changes the last-modified time of the blob. */ @JsonProperty(value = "Last-Modified") private OffsetDateTime lastModified; /* * The metadata property. */ @HeaderCollection("x-ms-meta-") private Map<String, String> metadata; /* * Optional. Only valid when Object Replication is enabled for the storage * container and on the destination blob of the replication. */ @JsonProperty(value = "x-ms-or-policy-id") private String objectReplicationDestinationPolicyId; /* * The objectReplicationRuleStatus property. */ @HeaderCollection("x-ms-or-") private List<ObjectReplicationPolicy> objectReplicationPolicies; /* * The number of bytes present in the response body. */ @JsonProperty(value = "Content-Length") private Long contentLength; /* * The media type of the body of the response. For Download Blob this is * 'application/octet-stream' */ @JsonProperty(value = "Content-Type") private String contentType; /* * Indicates the range of bytes returned in the event that the client * requested a subset of the blob by setting the 'Range' request header. */ @JsonProperty(value = "Content-Range") private String contentRange; /* * The ETag contains a value that you can use to perform operations * conditionally. If the request version is 2011-08-18 or newer, the ETag * value will be in quotes. */ @JsonProperty(value = "ETag") private String eTag; /* * If the blob has an MD5 hash and this operation is to read the full blob, * this response header is returned so that the client can check for * message content integrity. */ @JsonProperty(value = "Content-MD5") private byte[] contentMd5; /* * This header returns the value that was specified for the * Content-Encoding request header */ @JsonProperty(value = "Content-Encoding") private String contentEncoding; /* * This header is returned if it was previously specified for the blob. */ @JsonProperty(value = "Cache-Control") private String cacheControl; /* * This header returns the value that was specified for the * 'x-ms-blob-content-disposition' header. The Content-Disposition response * header field conveys additional information about how to process the * response payload, and also can be used to attach additional metadata. * For example, if set to attachment, it indicates that the user-agent * should not display the response, but instead show a Save As dialog with * a filename other than the blob name specified. */ @JsonProperty(value = "Content-Disposition") private String contentDisposition; /* * This header returns the value that was specified for the * Content-Language request header. */ @JsonProperty(value = "Content-Language") private String contentLanguage; /* * The current sequence number for a page blob. This header is not returned * for block blobs or append blobs */ @JsonProperty(value = "x-ms-blob-sequence-number") private Long blobSequenceNumber; /* * The blob's type. Possible values include: 'BlockBlob', 'PageBlob', * 'AppendBlob' */ @JsonProperty(value = "x-ms-blob-type") private BlobType blobType; /* * Conclusion time of the last attempted Copy Blob operation where this * blob was the destination blob. This value can specify the time of a * completed, aborted, or failed copy attempt. This header does not appear * if a copy is pending, if this blob has never been the destination in a * Copy Blob operation, or if this blob has been modified after a concluded * Copy Blob operation using Set Blob Properties, Put Blob, or Put Block * List. */ @JsonProperty(value = "x-ms-copy-completion-time") private OffsetDateTime copyCompletionTime; /* * Only appears when x-ms-copy-status is failed or pending. Describes the * cause of the last fatal or non-fatal copy operation failure. This header * does not appear if this blob has never been the destination in a Copy * Blob operation, or if this blob has been modified after a concluded Copy * Blob operation using Set Blob Properties, Put Blob, or Put Block List */ @JsonProperty(value = "x-ms-copy-status-description") private String copyStatusDescription; /* * String identifier for this copy operation. Use with Get Blob Properties * to check the status of this copy operation, or pass to Abort Copy Blob * to abort a pending copy. */ @JsonProperty(value = "x-ms-copy-id") private String copyId; /* * Contains the number of bytes copied and the total bytes in the source in * the last attempted Copy Blob operation where this blob was the * destination blob. Can show between 0 and Content-Length bytes copied. * This header does not appear if this blob has never been the destination * in a Copy Blob operation, or if this blob has been modified after a * concluded Copy Blob operation using Set Blob Properties, Put Blob, or * Put Block List */ @JsonProperty(value = "x-ms-copy-progress") private String copyProgress; /* * URL up to 2 KB in length that specifies the source blob or file used in * the last attempted Copy Blob operation where this blob was the * destination blob. This header does not appear if this blob has never * been the destination in a Copy Blob operation, or if this blob has been * modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. */ @JsonProperty(value = "x-ms-copy-source") private String copySource; /* * State of the copy operation identified by x-ms-copy-id. Possible values * include: 'pending', 'success', 'aborted', 'failed' */ @JsonProperty(value = "x-ms-copy-status") private CopyStatusType copyStatus; /* * When a blob is leased, specifies whether the lease is of infinite or * fixed duration. Possible values include: 'infinite', 'fixed' */ @JsonProperty(value = "x-ms-lease-duration") private LeaseDurationType leaseDuration; /* * Lease state of the blob. Possible values include: 'available', 'leased', * 'expired', 'breaking', 'broken' */ @JsonProperty(value = "x-ms-lease-state") private LeaseStateType leaseState; /* * The current lease status of the blob. Possible values include: 'locked', * 'unlocked' */ @JsonProperty(value = "x-ms-lease-status") private LeaseStatusType leaseStatus; /* * If a client request id header is sent in the request, this header will * be present in the response with the same value. */ @JsonProperty(value = "x-ms-client-request-id") private String clientRequestId; /* * This header uniquely identifies the request that was made and can be * used for troubleshooting the request. */ @JsonProperty(value = "x-ms-request-id") private String requestId; /* * Indicates the version of the Blob service used to execute the request. * This header is returned for requests made against version 2009-09-19 and * above. */ @JsonProperty(value = "x-ms-version") private String version; /* * A DateTime value returned by the service that uniquely identifies the * blob. The value of this header indicates the blob version, and may be * used in subsequent requests to access this version of the blob. */ @JsonProperty(value = "x-ms-version-id") private String versionId; /* * Indicates that the service supports requests for partial blob content. */ @JsonProperty(value = "Accept-Ranges") private String acceptRanges; /* * UTC date/time value generated by the service that indicates the time at * which the response was initiated */ @JsonProperty(value = "Date") private OffsetDateTime dateProperty; /* * The number of committed blocks present in the blob. This header is * returned only for append blobs. */ @JsonProperty(value = "x-ms-blob-committed-block-count") private Integer blobCommittedBlockCount; /* * The value of this header is set to true if the blob data and application * metadata are completely encrypted using the specified algorithm. * Otherwise, the value is set to false (when the blob is unencrypted, or * if only parts of the blob/application metadata are encrypted). */ @JsonProperty(value = "x-ms-server-encrypted") private Boolean isServerEncrypted; /* * The SHA-256 hash of the encryption key used to encrypt the blob. This * header is only returned when the blob was encrypted with a * customer-provided key. */ @JsonProperty(value = "x-ms-encryption-key-sha256") private String encryptionKeySha256; /* * Returns the name of the encryption scope used to encrypt the blob * contents and application metadata. Note that the absence of this header * implies use of the default account encryption scope. */ @JsonProperty(value = "x-ms-encryption-scope") private String encryptionScope; /* * If the blob has a MD5 hash, and if request contains range header (Range * or x-ms-range), this response header is returned with the value of the * whole blob's MD5 value. This value may or may not be equal to the value * returned in Content-MD5 header, with the latter calculated from the * requested range */ @JsonProperty(value = "x-ms-blob-content-md5") private byte[] blobContentMD5; /* * The number of tags associated with the blob */ @JsonProperty(value = "x-ms-tag-count") private Long tagCount; /* * If the request is to read a specified range and the * x-ms-range-get-content-crc64 is set to true, then the request returns a * crc64 for the range, as long as the range size is less than or equal to * 4 MB. If both x-ms-range-get-content-crc64 & x-ms-range-get-content-md5 * is specified in the same request, it will fail with 400(Bad Request) */ @JsonProperty(value = "x-ms-content-crc64") private byte[] contentCrc64; /* * The errorCode property. */ @JsonProperty(value = "x-ms-error-code") private String errorCode; /** * Get the lastModified property: Returns the date and time the container * was last modified. Any operation that modifies the blob, including an * update of the blob's metadata or properties, changes the last-modified * time of the blob. * * @return the lastModified value. */ public OffsetDateTime getLastModified() { return this.lastModified; } /** * Set the lastModified property: Returns the date and time the container * was last modified. Any operation that modifies the blob, including an * update of the blob's metadata or properties, changes the last-modified * time of the blob. * * @param lastModified the lastModified value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLastModified(OffsetDateTime lastModified) { this.lastModified = lastModified; return this; } /** * Get the metadata property: The metadata property. * * @return the metadata value. */ public Map<String, String> getMetadata() { return this.metadata; } /** * Set the metadata property: The metadata property. * * @param metadata the metadata value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setMetadata(Map<String, String> metadata) { this.metadata = metadata; return this; } /** * Get the objectReplicationDestinationPolicyId property: Optional. Only * valid when Object Replication is enabled for the storage container and * on the destination blob of the replication. * * @return the objectReplicationDestinationPolicyId value. */ public String getObjectReplicationDestinationPolicyId() { return this.objectReplicationDestinationPolicyId; } /** * Set the objectReplicationDestinationPolicyId property: Optional. Only * valid when Object Replication is enabled for the storage container and * on the destination blob of the replication. * * @param objectReplicationDestinationPolicyId the * objectReplicationDestinationPolicyId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setObjectReplicationDestinationPolicyId(String objectReplicationDestinationPolicyId) { this.objectReplicationDestinationPolicyId = objectReplicationDestinationPolicyId; return this; } /** * Get the objectReplicationPolicies property: The * objectReplicationPolicies property. * * @return the objectReplicationPolicies value. */ public List<ObjectReplicationPolicy> getObjectReplicationPolicies() { return this.objectReplicationPolicies; } /** * Set the objectReplicationPolicies property: The * objectReplicationPolicies property. * * @param objectReplicationPolicies the objectReplicationPolicies value * to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setObjectReplicationPolicies(List<ObjectReplicationPolicy> objectReplicationPolicies) { this.objectReplicationPolicies = objectReplicationPolicies; return this; } /** * Get the contentLength property: The number of bytes present in the * response body. * * @return the contentLength value. */ public Long getContentLength() { return this.contentLength; } /** * Set the contentLength property: The number of bytes present in the * response body. * * @param contentLength the contentLength value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentLength(Long contentLength) { this.contentLength = contentLength; return this; } /** * Get the contentType property: The media type of the body of the * response. For Download Blob this is 'application/octet-stream'. * * @return the contentType value. */ public String getContentType() { return this.contentType; } /** * Set the contentType property: The media type of the body of the * response. For Download Blob this is 'application/octet-stream'. * * @param contentType the contentType value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentType(String contentType) { this.contentType = contentType; return this; } /** * Get the contentRange property: Indicates the range of bytes returned in * the event that the client requested a subset of the blob by setting the * 'Range' request header. * * @return the contentRange value. */ public String getContentRange() { return this.contentRange; } /** * Set the contentRange property: Indicates the range of bytes returned in * the event that the client requested a subset of the blob by setting the * 'Range' request header. * * @param contentRange the contentRange value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentRange(String contentRange) { this.contentRange = contentRange; return this; } /** * Get the eTag property: The ETag contains a value that you can use to * perform operations conditionally. If the request version is 2011-08-18 * or newer, the ETag value will be in quotes. * * @return the eTag value. */ public String getETag() { return this.eTag; } /** * Set the eTag property: The ETag contains a value that you can use to * perform operations conditionally. If the request version is 2011-08-18 * or newer, the ETag value will be in quotes. * * @param eTag the eTag value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setETag(String eTag) { this.eTag = eTag; return this; } /** * Get the contentMd5 property: If the blob has an MD5 hash and this * operation is to read the full blob, this response header is returned so * that the client can check for message content integrity. * * @return the contentMd5 value. */ public byte[] getContentMd5() { return CoreUtils.clone(this.contentMd5); } /** * Set the contentMd5 property: If the blob has an MD5 hash and this * operation is to read the full blob, this response header is returned so * that the client can check for message content integrity. * * @param contentMd5 the contentMd5 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentMd5(byte[] contentMd5) { this.contentMd5 = CoreUtils.clone(contentMd5); return this; } /** * Get the contentEncoding property: This header returns the value that was * specified for the Content-Encoding request header. * * @return the contentEncoding value. */ public String getContentEncoding() { return this.contentEncoding; } /** * Set the contentEncoding property: This header returns the value that was * specified for the Content-Encoding request header. * * @param contentEncoding the contentEncoding value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentEncoding(String contentEncoding) { this.contentEncoding = contentEncoding; return this; } /** * Get the cacheControl property: This header is returned if it was * previously specified for the blob. * * @return the cacheControl value. */ public String getCacheControl() { return this.cacheControl; } /** * Set the cacheControl property: This header is returned if it was * previously specified for the blob. * * @param cacheControl the cacheControl value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCacheControl(String cacheControl) { this.cacheControl = cacheControl; return this; } /** * Get the contentDisposition property: This header returns the value that * was specified for the 'x-ms-blob-content-disposition' header. The * Content-Disposition response header field conveys additional information * about how to process the response payload, and also can be used to * attach additional metadata. For example, if set to attachment, it * indicates that the user-agent should not display the response, but * instead show a Save As dialog with a filename other than the blob name * specified. * * @return the contentDisposition value. */ public String getContentDisposition() { return this.contentDisposition; } /** * Set the contentDisposition property: This header returns the value that * was specified for the 'x-ms-blob-content-disposition' header. The * Content-Disposition response header field conveys additional information * about how to process the response payload, and also can be used to * attach additional metadata. For example, if set to attachment, it * indicates that the user-agent should not display the response, but * instead show a Save As dialog with a filename other than the blob name * specified. * * @param contentDisposition the contentDisposition value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentDisposition(String contentDisposition) { this.contentDisposition = contentDisposition; return this; } /** * Get the contentLanguage property: This header returns the value that was * specified for the Content-Language request header. * * @return the contentLanguage value. */ public String getContentLanguage() { return this.contentLanguage; } /** * Set the contentLanguage property: This header returns the value that was * specified for the Content-Language request header. * * @param contentLanguage the contentLanguage value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentLanguage(String contentLanguage) { this.contentLanguage = contentLanguage; return this; } /** * Get the blobSequenceNumber property: The current sequence number for a * page blob. This header is not returned for block blobs or append blobs. * * @return the blobSequenceNumber value. */ public Long getBlobSequenceNumber() { return this.blobSequenceNumber; } /** * Set the blobSequenceNumber property: The current sequence number for a * page blob. This header is not returned for block blobs or append blobs. * * @param blobSequenceNumber the blobSequenceNumber value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobSequenceNumber(Long blobSequenceNumber) { this.blobSequenceNumber = blobSequenceNumber; return this; } /** * Get the blobType property: The blob's type. Possible values include: * 'BlockBlob', 'PageBlob', 'AppendBlob'. * * @return the blobType value. */ public BlobType getBlobType() { return this.blobType; } /** * Set the blobType property: The blob's type. Possible values include: * 'BlockBlob', 'PageBlob', 'AppendBlob'. * * @param blobType the blobType value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobType(BlobType blobType) { this.blobType = blobType; return this; } /** * Get the copyCompletionTime property: Conclusion time of the last * attempted Copy Blob operation where this blob was the destination blob. * This value can specify the time of a completed, aborted, or failed copy * attempt. This header does not appear if a copy is pending, if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @return the copyCompletionTime value. */ public OffsetDateTime getCopyCompletionTime() { return this.copyCompletionTime; } /** * Set the copyCompletionTime property: Conclusion time of the last * attempted Copy Blob operation where this blob was the destination blob. * This value can specify the time of a completed, aborted, or failed copy * attempt. This header does not appear if a copy is pending, if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @param copyCompletionTime the copyCompletionTime value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyCompletionTime(OffsetDateTime copyCompletionTime) { this.copyCompletionTime = copyCompletionTime; return this; } /** * Get the copyStatusDescription property: Only appears when * x-ms-copy-status is failed or pending. Describes the cause of the last * fatal or non-fatal copy operation failure. This header does not appear * if this blob has never been the destination in a Copy Blob operation, or * if this blob has been modified after a concluded Copy Blob operation * using Set Blob Properties, Put Blob, or Put Block List. * * @return the copyStatusDescription value. */ public String getCopyStatusDescription() { return this.copyStatusDescription; } /** * Set the copyStatusDescription property: Only appears when * x-ms-copy-status is failed or pending. Describes the cause of the last * fatal or non-fatal copy operation failure. This header does not appear * if this blob has never been the destination in a Copy Blob operation, or * if this blob has been modified after a concluded Copy Blob operation * using Set Blob Properties, Put Blob, or Put Block List. * * @param copyStatusDescription the copyStatusDescription value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyStatusDescription(String copyStatusDescription) { this.copyStatusDescription = copyStatusDescription; return this; } /** * Get the copyId property: String identifier for this copy operation. Use * with Get Blob Properties to check the status of this copy operation, or * pass to Abort Copy Blob to abort a pending copy. * * @return the copyId value. */ public String getCopyId() { return this.copyId; } /** * Set the copyId property: String identifier for this copy operation. Use * with Get Blob Properties to check the status of this copy operation, or * pass to Abort Copy Blob to abort a pending copy. * * @param copyId the copyId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyId(String copyId) { this.copyId = copyId; return this; } /** * Get the copyProgress property: Contains the number of bytes copied and * the total bytes in the source in the last attempted Copy Blob operation * where this blob was the destination blob. Can show between 0 and * Content-Length bytes copied. This header does not appear if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @return the copyProgress value. */ public String getCopyProgress() { return this.copyProgress; } /** * Set the copyProgress property: Contains the number of bytes copied and * the total bytes in the source in the last attempted Copy Blob operation * where this blob was the destination blob. Can show between 0 and * Content-Length bytes copied. This header does not appear if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @param copyProgress the copyProgress value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyProgress(String copyProgress) { this.copyProgress = copyProgress; return this; } /** * Get the copySource property: URL up to 2 KB in length that specifies the * source blob or file used in the last attempted Copy Blob operation where * this blob was the destination blob. This header does not appear if this * blob has never been the destination in a Copy Blob operation, or if this * blob has been modified after a concluded Copy Blob operation using Set * Blob Properties, Put Blob, or Put Block List. * * @return the copySource value. */ public String getCopySource() { return this.copySource; } /** * Set the copySource property: URL up to 2 KB in length that specifies the * source blob or file used in the last attempted Copy Blob operation where * this blob was the destination blob. This header does not appear if this * blob has never been the destination in a Copy Blob operation, or if this * blob has been modified after a concluded Copy Blob operation using Set * Blob Properties, Put Blob, or Put Block List. * * @param copySource the copySource value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopySource(String copySource) { this.copySource = copySource; return this; } /** * Get the copyStatus property: State of the copy operation identified by * x-ms-copy-id. Possible values include: 'pending', 'success', 'aborted', * 'failed'. * * @return the copyStatus value. */ public CopyStatusType getCopyStatus() { return this.copyStatus; } /** * Set the copyStatus property: State of the copy operation identified by * x-ms-copy-id. Possible values include: 'pending', 'success', 'aborted', * 'failed'. * * @param copyStatus the copyStatus value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyStatus(CopyStatusType copyStatus) { this.copyStatus = copyStatus; return this; } /** * Get the leaseDuration property: When a blob is leased, specifies whether * the lease is of infinite or fixed duration. Possible values include: * 'infinite', 'fixed'. * * @return the leaseDuration value. */ public LeaseDurationType getLeaseDuration() { return this.leaseDuration; } /** * Set the leaseDuration property: When a blob is leased, specifies whether * the lease is of infinite or fixed duration. Possible values include: * 'infinite', 'fixed'. * * @param leaseDuration the leaseDuration value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLeaseDuration(LeaseDurationType leaseDuration) { this.leaseDuration = leaseDuration; return this; } /** * Get the leaseState property: Lease state of the blob. Possible values * include: 'available', 'leased', 'expired', 'breaking', 'broken'. * * @return the leaseState value. */ public LeaseStateType getLeaseState() { return this.leaseState; } /** * Set the leaseState property: Lease state of the blob. Possible values * include: 'available', 'leased', 'expired', 'breaking', 'broken'. * * @param leaseState the leaseState value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLeaseState(LeaseStateType leaseState) { this.leaseState = leaseState; return this; } /** * Get the leaseStatus property: The current lease status of the blob. * Possible values include: 'locked', 'unlocked'. * * @return the leaseStatus value. */ public LeaseStatusType getLeaseStatus() { return this.leaseStatus; } /** * Set the leaseStatus property: The current lease status of the blob. * Possible values include: 'locked', 'unlocked'. * * @param leaseStatus the leaseStatus value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLeaseStatus(LeaseStatusType leaseStatus) { this.leaseStatus = leaseStatus; return this; } /** * Get the clientRequestId property: If a client request id header is sent * in the request, this header will be present in the response with the * same value. * * @return the clientRequestId value. */ public String getClientRequestId() { return this.clientRequestId; } /** * Set the clientRequestId property: If a client request id header is sent * in the request, this header will be present in the response with the * same value. * * @param clientRequestId the clientRequestId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setClientRequestId(String clientRequestId) { this.clientRequestId = clientRequestId; return this; } /** * Get the requestId property: This header uniquely identifies the request * that was made and can be used for troubleshooting the request. * * @return the requestId value. */ public String getRequestId() { return this.requestId; } /** * Set the requestId property: This header uniquely identifies the request * that was made and can be used for troubleshooting the request. * * @param requestId the requestId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setRequestId(String requestId) { this.requestId = requestId; return this; } /** * Get the version property: Indicates the version of the Blob service used * to execute the request. This header is returned for requests made * against version 2009-09-19 and above. * * @return the version value. */ public String getVersion() { return this.version; } /** * Set the version property: Indicates the version of the Blob service used * to execute the request. This header is returned for requests made * against version 2009-09-19 and above. * * @param version the version value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setVersion(String version) { this.version = version; return this; } /** * Get the versionId property: A DateTime value returned by the service * that uniquely identifies the blob. The value of this header indicates * the blob version, and may be used in subsequent requests to access this * version of the blob. * * @return the versionId value. */ public String getVersionId() { return this.versionId; } /** * Set the versionId property: A DateTime value returned by the service * that uniquely identifies the blob. The value of this header indicates * the blob version, and may be used in subsequent requests to access this * version of the blob. * * @param versionId the versionId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setVersionId(String versionId) { this.versionId = versionId; return this; } /** * Get the acceptRanges property: Indicates that the service supports * requests for partial blob content. * * @return the acceptRanges value. */ public String getAcceptRanges() { return this.acceptRanges; } /** * Set the acceptRanges property: Indicates that the service supports * requests for partial blob content. * * @param acceptRanges the acceptRanges value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setAcceptRanges(String acceptRanges) { this.acceptRanges = acceptRanges; return this; } /** * Get the dateProperty property: UTC date/time value generated by the * service that indicates the time at which the response was initiated. * * @return the dateProperty value. */ public OffsetDateTime getDateProperty() { return this.dateProperty; } /** * Set the dateProperty property: UTC date/time value generated by the * service that indicates the time at which the response was initiated. * * @param dateProperty the dateProperty value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setDateProperty(OffsetDateTime dateProperty) { this.dateProperty = dateProperty; return this; } /** * Get the blobCommittedBlockCount property: The number of committed blocks * present in the blob. This header is returned only for append blobs. * * @return the blobCommittedBlockCount value. */ public Integer getBlobCommittedBlockCount() { return this.blobCommittedBlockCount; } /** * Set the blobCommittedBlockCount property: The number of committed blocks * present in the blob. This header is returned only for append blobs. * * @param blobCommittedBlockCount the blobCommittedBlockCount value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobCommittedBlockCount(Integer blobCommittedBlockCount) { this.blobCommittedBlockCount = blobCommittedBlockCount; return this; } /** * Get the isServerEncrypted property: The value of this header is set to * true if the blob data and application metadata are completely encrypted * using the specified algorithm. Otherwise, the value is set to false * (when the blob is unencrypted, or if only parts of the blob/application * metadata are encrypted). * * @return the isServerEncrypted value. */ public Boolean isServerEncrypted() { return this.isServerEncrypted; } /** * Set the isServerEncrypted property: The value of this header is set to * true if the blob data and application metadata are completely encrypted * using the specified algorithm. Otherwise, the value is set to false * (when the blob is unencrypted, or if only parts of the blob/application * metadata are encrypted). * * @param isServerEncrypted the isServerEncrypted value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setIsServerEncrypted(Boolean isServerEncrypted) { this.isServerEncrypted = isServerEncrypted; return this; } /** * Get the encryptionKeySha256 property: The SHA-256 hash of the encryption * key used to encrypt the blob. This header is only returned when the blob * was encrypted with a customer-provided key. * * @return the encryptionKeySha256 value. */ public String getEncryptionKeySha256() { return this.encryptionKeySha256; } /** * Set the encryptionKeySha256 property: The SHA-256 hash of the encryption * key used to encrypt the blob. This header is only returned when the blob * was encrypted with a customer-provided key. * * @param encryptionKeySha256 the encryptionKeySha256 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setEncryptionKeySha256(String encryptionKeySha256) { this.encryptionKeySha256 = encryptionKeySha256; return this; } /** * Get the encryptionScope property: Returns the name of the encryption * scope used to encrypt the blob contents and application metadata. Note * that the absence of this header implies use of the default account * encryption scope. * * @return the encryptionScope value. */ public String getEncryptionScope() { return this.encryptionScope; } /** * Set the encryptionScope property: Returns the name of the encryption * scope used to encrypt the blob contents and application metadata. Note * that the absence of this header implies use of the default account * encryption scope. * * @param encryptionScope the encryptionScope value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setEncryptionScope(String encryptionScope) { this.encryptionScope = encryptionScope; return this; } /** * Get the blobContentMD5 property: If the blob has a MD5 hash, and if * request contains range header (Range or x-ms-range), this response * header is returned with the value of the whole blob's MD5 value. This * value may or may not be equal to the value returned in Content-MD5 * header, with the latter calculated from the requested range. * * @return the blobContentMD5 value. */ public byte[] getBlobContentMD5() { return CoreUtils.clone(this.blobContentMD5); } /** * Set the blobContentMD5 property: If the blob has a MD5 hash, and if * request contains range header (Range or x-ms-range), this response * header is returned with the value of the whole blob's MD5 value. This * value may or may not be equal to the value returned in Content-MD5 * header, with the latter calculated from the requested range. * * @param blobContentMD5 the blobContentMD5 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobContentMD5(byte[] blobContentMD5) { this.blobContentMD5 = CoreUtils.clone(blobContentMD5); return this; } /** * Get the tagCount property: The number of tags associated with the blob. * * @return the tagCount value. */ public Long getTagCount() { return this.tagCount; } /** * Set the tagCount property: The number of tags associated with the blob. * * @param tagCount the tagCount value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setTagCount(Long tagCount) { this.tagCount = tagCount; return this; } /** * Get the contentCrc64 property: If the request is to read a specified * range and the x-ms-range-get-content-crc64 is set to true, then the * request returns a crc64 for the range, as long as the range size is less * than or equal to 4 MB. If both x-ms-range-get-content-crc64 &amp; * x-ms-range-get-content-md5 is specified in the same request, it will * fail with 400(Bad Request). * * @return the contentCrc64 value. */ public byte[] getContentCrc64() { return CoreUtils.clone(this.contentCrc64); } /** * Set the contentCrc64 property: If the request is to read a specified * range and the x-ms-range-get-content-crc64 is set to true, then the * request returns a crc64 for the range, as long as the range size is less * than or equal to 4 MB. If both x-ms-range-get-content-crc64 &amp; * x-ms-range-get-content-md5 is specified in the same request, it will * fail with 400(Bad Request). * * @param contentCrc64 the contentCrc64 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentCrc64(byte[] contentCrc64) { this.contentCrc64 = CoreUtils.clone(contentCrc64); return this; } /** * Get the errorCode property: The errorCode property. * * @return the errorCode value. */ public String getErrorCode() { return this.errorCode; } /** * Set the errorCode property: The errorCode property. * * @param errorCode the errorCode value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setErrorCode(String errorCode) { this.errorCode = errorCode; return this; } }
class BlobDownloadHeaders { /** * Instantiates an empty {@code BlobDownloadHeaders}. */ /** * Instantiates a {@code BlobDownloadHeaders} object based on the generated, internal version of the type. * @param headers The generated headers type from which to extract values. */ public BlobDownloadHeaders(com.azure.storage.blob.implementation.models.BlobDownloadHeaders headers) { /* We have these two types because we needed to update this interface in a way that could not be generated (getObjectReplicationSourcePolicies), so we switched to generating BlobDownloadHeaders into implementation and wrapping it. Because it's headers type, we couldn't change the name of the generated type. */ this.lastModified = headers.getLastModified(); this.metadata = headers.getMetadata(); this.eTag = headers.getETag(); this.contentLength = headers.getContentLength(); this.contentType = headers.getContentType(); this.contentRange = headers.getContentRange(); this.contentEncoding = headers.getContentEncoding(); this.contentLanguage = headers.getContentLanguage(); this.contentMd5 = headers.getContentMd5(); this.contentDisposition = headers.getContentDisposition(); this.cacheControl = headers.getCacheControl(); this.blobSequenceNumber = headers.getBlobSequenceNumber(); this.blobType = headers.getBlobType(); this.leaseStatus = headers.getLeaseStatus(); this.leaseState = headers.getLeaseState(); this.leaseDuration = headers.getLeaseDuration(); this.copyId = headers.getCopyId(); this.copyStatus = headers.getCopyStatus(); this.copySource = headers.getCopySource(); this.copyProgress = headers.getCopyProgress(); this.copyCompletionTime = headers.getCopyCompletionTime(); this.copyStatusDescription = headers.getCopyStatusDescription(); this.isServerEncrypted = headers.isServerEncrypted(); this.clientRequestId = headers.getClientRequestId(); this.requestId = headers.getRequestId(); this.version = headers.getVersion(); this.versionId = headers.getVersionId(); this.acceptRanges = headers.getAcceptRanges(); this.dateProperty = headers.getDateProperty(); this.blobCommittedBlockCount = headers.getBlobCommittedBlockCount(); this.encryptionKeySha256 = headers.getEncryptionKeySha256(); this.encryptionScope = headers.getEncryptionScope(); this.blobContentMD5 = headers.getBlobContentMD5(); this.contentCrc64 = headers.getContentCrc64(); this.errorCode = headers.getErrorCode(); this.tagCount = headers.getTagCount(); Map<String, String> objectReplicationStatus = headers.getObjectReplicationRules(); Map<String, List<ObjectReplicationRule>> internalSourcePolicies = new HashMap<>(); objectReplicationStatus = objectReplicationStatus == null ? new HashMap<>() : objectReplicationStatus; this.objectReplicationDestinationPolicyId = objectReplicationStatus.getOrDefault("policy-id", null); if (this.objectReplicationDestinationPolicyId == null) { for (Map.Entry<String, String> entry : objectReplicationStatus.entrySet()) { String[] split = entry.getKey().split("_"); String policyId = split[0]; String ruleId = split[1]; ObjectReplicationRule rule = new ObjectReplicationRule(ruleId, ObjectReplicationStatus.fromString(entry.getValue())); if (!internalSourcePolicies.containsKey(policyId)) { internalSourcePolicies.put(policyId, new ArrayList<>()); } internalSourcePolicies.get(policyId).add(rule); } } this.objectReplicationSourcePolicies = new ArrayList<>(); for (Map.Entry<String, List<ObjectReplicationRule>> entry : internalSourcePolicies.entrySet()) { this.objectReplicationSourcePolicies.add(new ObjectReplicationPolicy(entry.getKey(), entry.getValue())); } } /* * Returns the date and time the container was last modified. Any operation * that modifies the blob, including an update of the blob's metadata or * properties, changes the last-modified time of the blob. */ @JsonProperty(value = "Last-Modified") private OffsetDateTime lastModified; /* * The metadata property. */ @HeaderCollection("x-ms-meta-") private Map<String, String> metadata; /* * Optional. Only valid when Object Replication is enabled for the storage * container and on the destination blob of the replication. */ @JsonProperty(value = "x-ms-or-policy-id") private String objectReplicationDestinationPolicyId; /* * The objectReplicationRuleStatus property. */ @HeaderCollection("x-ms-or-") private List<ObjectReplicationPolicy> objectReplicationSourcePolicies; /* * The number of bytes present in the response body. */ @JsonProperty(value = "Content-Length") private Long contentLength; /* * The media type of the body of the response. For Download Blob this is * 'application/octet-stream' */ @JsonProperty(value = "Content-Type") private String contentType; /* * Indicates the range of bytes returned in the event that the client * requested a subset of the blob by setting the 'Range' request header. */ @JsonProperty(value = "Content-Range") private String contentRange; /* * The ETag contains a value that you can use to perform operations * conditionally. If the request version is 2011-08-18 or newer, the ETag * value will be in quotes. */ @JsonProperty(value = "ETag") private String eTag; /* * If the blob has an MD5 hash and this operation is to read the full blob, * this response header is returned so that the client can check for * message content integrity. */ @JsonProperty(value = "Content-MD5") private byte[] contentMd5; /* * This header returns the value that was specified for the * Content-Encoding request header */ @JsonProperty(value = "Content-Encoding") private String contentEncoding; /* * This header is returned if it was previously specified for the blob. */ @JsonProperty(value = "Cache-Control") private String cacheControl; /* * This header returns the value that was specified for the * 'x-ms-blob-content-disposition' header. The Content-Disposition response * header field conveys additional information about how to process the * response payload, and also can be used to attach additional metadata. * For example, if set to attachment, it indicates that the user-agent * should not display the response, but instead show a Save As dialog with * a filename other than the blob name specified. */ @JsonProperty(value = "Content-Disposition") private String contentDisposition; /* * This header returns the value that was specified for the * Content-Language request header. */ @JsonProperty(value = "Content-Language") private String contentLanguage; /* * The current sequence number for a page blob. This header is not returned * for block blobs or append blobs */ @JsonProperty(value = "x-ms-blob-sequence-number") private Long blobSequenceNumber; /* * The blob's type. Possible values include: 'BlockBlob', 'PageBlob', * 'AppendBlob' */ @JsonProperty(value = "x-ms-blob-type") private BlobType blobType; /* * Conclusion time of the last attempted Copy Blob operation where this * blob was the destination blob. This value can specify the time of a * completed, aborted, or failed copy attempt. This header does not appear * if a copy is pending, if this blob has never been the destination in a * Copy Blob operation, or if this blob has been modified after a concluded * Copy Blob operation using Set Blob Properties, Put Blob, or Put Block * List. */ @JsonProperty(value = "x-ms-copy-completion-time") private OffsetDateTime copyCompletionTime; /* * Only appears when x-ms-copy-status is failed or pending. Describes the * cause of the last fatal or non-fatal copy operation failure. This header * does not appear if this blob has never been the destination in a Copy * Blob operation, or if this blob has been modified after a concluded Copy * Blob operation using Set Blob Properties, Put Blob, or Put Block List */ @JsonProperty(value = "x-ms-copy-status-description") private String copyStatusDescription; /* * String identifier for this copy operation. Use with Get Blob Properties * to check the status of this copy operation, or pass to Abort Copy Blob * to abort a pending copy. */ @JsonProperty(value = "x-ms-copy-id") private String copyId; /* * Contains the number of bytes copied and the total bytes in the source in * the last attempted Copy Blob operation where this blob was the * destination blob. Can show between 0 and Content-Length bytes copied. * This header does not appear if this blob has never been the destination * in a Copy Blob operation, or if this blob has been modified after a * concluded Copy Blob operation using Set Blob Properties, Put Blob, or * Put Block List */ @JsonProperty(value = "x-ms-copy-progress") private String copyProgress; /* * URL up to 2 KB in length that specifies the source blob or file used in * the last attempted Copy Blob operation where this blob was the * destination blob. This header does not appear if this blob has never * been the destination in a Copy Blob operation, or if this blob has been * modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. */ @JsonProperty(value = "x-ms-copy-source") private String copySource; /* * State of the copy operation identified by x-ms-copy-id. Possible values * include: 'pending', 'success', 'aborted', 'failed' */ @JsonProperty(value = "x-ms-copy-status") private CopyStatusType copyStatus; /* * When a blob is leased, specifies whether the lease is of infinite or * fixed duration. Possible values include: 'infinite', 'fixed' */ @JsonProperty(value = "x-ms-lease-duration") private LeaseDurationType leaseDuration; /* * Lease state of the blob. Possible values include: 'available', 'leased', * 'expired', 'breaking', 'broken' */ @JsonProperty(value = "x-ms-lease-state") private LeaseStateType leaseState; /* * The current lease status of the blob. Possible values include: 'locked', * 'unlocked' */ @JsonProperty(value = "x-ms-lease-status") private LeaseStatusType leaseStatus; /* * If a client request id header is sent in the request, this header will * be present in the response with the same value. */ @JsonProperty(value = "x-ms-client-request-id") private String clientRequestId; /* * This header uniquely identifies the request that was made and can be * used for troubleshooting the request. */ @JsonProperty(value = "x-ms-request-id") private String requestId; /* * Indicates the version of the Blob service used to execute the request. * This header is returned for requests made against version 2009-09-19 and * above. */ @JsonProperty(value = "x-ms-version") private String version; /* * A DateTime value returned by the service that uniquely identifies the * blob. The value of this header indicates the blob version, and may be * used in subsequent requests to access this version of the blob. */ @JsonProperty(value = "x-ms-version-id") private String versionId; /* * Indicates that the service supports requests for partial blob content. */ @JsonProperty(value = "Accept-Ranges") private String acceptRanges; /* * UTC date/time value generated by the service that indicates the time at * which the response was initiated */ @JsonProperty(value = "Date") private OffsetDateTime dateProperty; /* * The number of committed blocks present in the blob. This header is * returned only for append blobs. */ @JsonProperty(value = "x-ms-blob-committed-block-count") private Integer blobCommittedBlockCount; /* * The value of this header is set to true if the blob data and application * metadata are completely encrypted using the specified algorithm. * Otherwise, the value is set to false (when the blob is unencrypted, or * if only parts of the blob/application metadata are encrypted). */ @JsonProperty(value = "x-ms-server-encrypted") private Boolean isServerEncrypted; /* * The SHA-256 hash of the encryption key used to encrypt the blob. This * header is only returned when the blob was encrypted with a * customer-provided key. */ @JsonProperty(value = "x-ms-encryption-key-sha256") private String encryptionKeySha256; /* * Returns the name of the encryption scope used to encrypt the blob * contents and application metadata. Note that the absence of this header * implies use of the default account encryption scope. */ @JsonProperty(value = "x-ms-encryption-scope") private String encryptionScope; /* * If the blob has a MD5 hash, and if request contains range header (Range * or x-ms-range), this response header is returned with the value of the * whole blob's MD5 value. This value may or may not be equal to the value * returned in Content-MD5 header, with the latter calculated from the * requested range */ @JsonProperty(value = "x-ms-blob-content-md5") private byte[] blobContentMD5; /* * The number of tags associated with the blob */ @JsonProperty(value = "x-ms-tag-count") private Long tagCount; /* * If the request is to read a specified range and the * x-ms-range-get-content-crc64 is set to true, then the request returns a * crc64 for the range, as long as the range size is less than or equal to * 4 MB. If both x-ms-range-get-content-crc64 & x-ms-range-get-content-md5 * is specified in the same request, it will fail with 400(Bad Request) */ @JsonProperty(value = "x-ms-content-crc64") private byte[] contentCrc64; /* * The errorCode property. */ @JsonProperty(value = "x-ms-error-code") private String errorCode; /** * Get the lastModified property: Returns the date and time the container * was last modified. Any operation that modifies the blob, including an * update of the blob's metadata or properties, changes the last-modified * time of the blob. * * @return the lastModified value. */ public OffsetDateTime getLastModified() { return this.lastModified; } /** * Set the lastModified property: Returns the date and time the container * was last modified. Any operation that modifies the blob, including an * update of the blob's metadata or properties, changes the last-modified * time of the blob. * * @param lastModified the lastModified value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLastModified(OffsetDateTime lastModified) { this.lastModified = lastModified; return this; } /** * Get the metadata property: The metadata property. * * @return the metadata value. */ public Map<String, String> getMetadata() { return this.metadata; } /** * Set the metadata property: The metadata property. * * @param metadata the metadata value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setMetadata(Map<String, String> metadata) { this.metadata = metadata; return this; } /** * Get the objectReplicationDestinationPolicyId property: Optional. Only * valid when Object Replication is enabled for the storage container and * on the destination blob of the replication. * * @return the objectReplicationDestinationPolicyId value. */ public String getObjectReplicationDestinationPolicyId() { return this.objectReplicationDestinationPolicyId; } /** * Set the objectReplicationDestinationPolicyId property: Optional. Only * valid when Object Replication is enabled for the storage container and * on the destination blob of the replication. * * @param objectReplicationDestinationPolicyId the * objectReplicationDestinationPolicyId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setObjectReplicationDestinationPolicyId(String objectReplicationDestinationPolicyId) { this.objectReplicationDestinationPolicyId = objectReplicationDestinationPolicyId; return this; } /** * Get the objectReplicationSourcePolicies property: The * objectReplicationSourcePolicies property. * * @return the objectReplicationSourcePolicies value. */ public List<ObjectReplicationPolicy> getObjectReplicationSourcePolicies() { return Collections.unmodifiableList(this.objectReplicationSourcePolicies); } /** * Set the objectReplicationSourcePolicies property: The * objectReplicationSourcePolicies property. * * @param objectReplicationSourcePolicies the objectReplicationSourcePolicies value * to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setObjectReplicationSourcePolicies( List<ObjectReplicationPolicy> objectReplicationSourcePolicies) { this.objectReplicationSourcePolicies = objectReplicationSourcePolicies; return this; } /** * Get the contentLength property: The number of bytes present in the * response body. * * @return the contentLength value. */ public Long getContentLength() { return this.contentLength; } /** * Set the contentLength property: The number of bytes present in the * response body. * * @param contentLength the contentLength value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentLength(Long contentLength) { this.contentLength = contentLength; return this; } /** * Get the contentType property: The media type of the body of the * response. For Download Blob this is 'application/octet-stream'. * * @return the contentType value. */ public String getContentType() { return this.contentType; } /** * Set the contentType property: The media type of the body of the * response. For Download Blob this is 'application/octet-stream'. * * @param contentType the contentType value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentType(String contentType) { this.contentType = contentType; return this; } /** * Get the contentRange property: Indicates the range of bytes returned in * the event that the client requested a subset of the blob by setting the * 'Range' request header. * * @return the contentRange value. */ public String getContentRange() { return this.contentRange; } /** * Set the contentRange property: Indicates the range of bytes returned in * the event that the client requested a subset of the blob by setting the * 'Range' request header. * * @param contentRange the contentRange value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentRange(String contentRange) { this.contentRange = contentRange; return this; } /** * Get the eTag property: The ETag contains a value that you can use to * perform operations conditionally. If the request version is 2011-08-18 * or newer, the ETag value will be in quotes. * * @return the eTag value. */ public String getETag() { return this.eTag; } /** * Set the eTag property: The ETag contains a value that you can use to * perform operations conditionally. If the request version is 2011-08-18 * or newer, the ETag value will be in quotes. * * @param eTag the eTag value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setETag(String eTag) { this.eTag = eTag; return this; } /** * Get the contentMd5 property: If the blob has an MD5 hash and this * operation is to read the full blob, this response header is returned so * that the client can check for message content integrity. * * @return the contentMd5 value. */ public byte[] getContentMd5() { return CoreUtils.clone(this.contentMd5); } /** * Set the contentMd5 property: If the blob has an MD5 hash and this * operation is to read the full blob, this response header is returned so * that the client can check for message content integrity. * * @param contentMd5 the contentMd5 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentMd5(byte[] contentMd5) { this.contentMd5 = CoreUtils.clone(contentMd5); return this; } /** * Get the contentEncoding property: This header returns the value that was * specified for the Content-Encoding request header. * * @return the contentEncoding value. */ public String getContentEncoding() { return this.contentEncoding; } /** * Set the contentEncoding property: This header returns the value that was * specified for the Content-Encoding request header. * * @param contentEncoding the contentEncoding value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentEncoding(String contentEncoding) { this.contentEncoding = contentEncoding; return this; } /** * Get the cacheControl property: This header is returned if it was * previously specified for the blob. * * @return the cacheControl value. */ public String getCacheControl() { return this.cacheControl; } /** * Set the cacheControl property: This header is returned if it was * previously specified for the blob. * * @param cacheControl the cacheControl value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCacheControl(String cacheControl) { this.cacheControl = cacheControl; return this; } /** * Get the contentDisposition property: This header returns the value that * was specified for the 'x-ms-blob-content-disposition' header. The * Content-Disposition response header field conveys additional information * about how to process the response payload, and also can be used to * attach additional metadata. For example, if set to attachment, it * indicates that the user-agent should not display the response, but * instead show a Save As dialog with a filename other than the blob name * specified. * * @return the contentDisposition value. */ public String getContentDisposition() { return this.contentDisposition; } /** * Set the contentDisposition property: This header returns the value that * was specified for the 'x-ms-blob-content-disposition' header. The * Content-Disposition response header field conveys additional information * about how to process the response payload, and also can be used to * attach additional metadata. For example, if set to attachment, it * indicates that the user-agent should not display the response, but * instead show a Save As dialog with a filename other than the blob name * specified. * * @param contentDisposition the contentDisposition value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentDisposition(String contentDisposition) { this.contentDisposition = contentDisposition; return this; } /** * Get the contentLanguage property: This header returns the value that was * specified for the Content-Language request header. * * @return the contentLanguage value. */ public String getContentLanguage() { return this.contentLanguage; } /** * Set the contentLanguage property: This header returns the value that was * specified for the Content-Language request header. * * @param contentLanguage the contentLanguage value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentLanguage(String contentLanguage) { this.contentLanguage = contentLanguage; return this; } /** * Get the blobSequenceNumber property: The current sequence number for a * page blob. This header is not returned for block blobs or append blobs. * * @return the blobSequenceNumber value. */ public Long getBlobSequenceNumber() { return this.blobSequenceNumber; } /** * Set the blobSequenceNumber property: The current sequence number for a * page blob. This header is not returned for block blobs or append blobs. * * @param blobSequenceNumber the blobSequenceNumber value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobSequenceNumber(Long blobSequenceNumber) { this.blobSequenceNumber = blobSequenceNumber; return this; } /** * Get the blobType property: The blob's type. Possible values include: * 'BlockBlob', 'PageBlob', 'AppendBlob'. * * @return the blobType value. */ public BlobType getBlobType() { return this.blobType; } /** * Set the blobType property: The blob's type. Possible values include: * 'BlockBlob', 'PageBlob', 'AppendBlob'. * * @param blobType the blobType value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobType(BlobType blobType) { this.blobType = blobType; return this; } /** * Get the copyCompletionTime property: Conclusion time of the last * attempted Copy Blob operation where this blob was the destination blob. * This value can specify the time of a completed, aborted, or failed copy * attempt. This header does not appear if a copy is pending, if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @return the copyCompletionTime value. */ public OffsetDateTime getCopyCompletionTime() { return this.copyCompletionTime; } /** * Set the copyCompletionTime property: Conclusion time of the last * attempted Copy Blob operation where this blob was the destination blob. * This value can specify the time of a completed, aborted, or failed copy * attempt. This header does not appear if a copy is pending, if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @param copyCompletionTime the copyCompletionTime value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyCompletionTime(OffsetDateTime copyCompletionTime) { this.copyCompletionTime = copyCompletionTime; return this; } /** * Get the copyStatusDescription property: Only appears when * x-ms-copy-status is failed or pending. Describes the cause of the last * fatal or non-fatal copy operation failure. This header does not appear * if this blob has never been the destination in a Copy Blob operation, or * if this blob has been modified after a concluded Copy Blob operation * using Set Blob Properties, Put Blob, or Put Block List. * * @return the copyStatusDescription value. */ public String getCopyStatusDescription() { return this.copyStatusDescription; } /** * Set the copyStatusDescription property: Only appears when * x-ms-copy-status is failed or pending. Describes the cause of the last * fatal or non-fatal copy operation failure. This header does not appear * if this blob has never been the destination in a Copy Blob operation, or * if this blob has been modified after a concluded Copy Blob operation * using Set Blob Properties, Put Blob, or Put Block List. * * @param copyStatusDescription the copyStatusDescription value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyStatusDescription(String copyStatusDescription) { this.copyStatusDescription = copyStatusDescription; return this; } /** * Get the copyId property: String identifier for this copy operation. Use * with Get Blob Properties to check the status of this copy operation, or * pass to Abort Copy Blob to abort a pending copy. * * @return the copyId value. */ public String getCopyId() { return this.copyId; } /** * Set the copyId property: String identifier for this copy operation. Use * with Get Blob Properties to check the status of this copy operation, or * pass to Abort Copy Blob to abort a pending copy. * * @param copyId the copyId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyId(String copyId) { this.copyId = copyId; return this; } /** * Get the copyProgress property: Contains the number of bytes copied and * the total bytes in the source in the last attempted Copy Blob operation * where this blob was the destination blob. Can show between 0 and * Content-Length bytes copied. This header does not appear if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @return the copyProgress value. */ public String getCopyProgress() { return this.copyProgress; } /** * Set the copyProgress property: Contains the number of bytes copied and * the total bytes in the source in the last attempted Copy Blob operation * where this blob was the destination blob. Can show between 0 and * Content-Length bytes copied. This header does not appear if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @param copyProgress the copyProgress value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyProgress(String copyProgress) { this.copyProgress = copyProgress; return this; } /** * Get the copySource property: URL up to 2 KB in length that specifies the * source blob or file used in the last attempted Copy Blob operation where * this blob was the destination blob. This header does not appear if this * blob has never been the destination in a Copy Blob operation, or if this * blob has been modified after a concluded Copy Blob operation using Set * Blob Properties, Put Blob, or Put Block List. * * @return the copySource value. */ public String getCopySource() { return this.copySource; } /** * Set the copySource property: URL up to 2 KB in length that specifies the * source blob or file used in the last attempted Copy Blob operation where * this blob was the destination blob. This header does not appear if this * blob has never been the destination in a Copy Blob operation, or if this * blob has been modified after a concluded Copy Blob operation using Set * Blob Properties, Put Blob, or Put Block List. * * @param copySource the copySource value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopySource(String copySource) { this.copySource = copySource; return this; } /** * Get the copyStatus property: State of the copy operation identified by * x-ms-copy-id. Possible values include: 'pending', 'success', 'aborted', * 'failed'. * * @return the copyStatus value. */ public CopyStatusType getCopyStatus() { return this.copyStatus; } /** * Set the copyStatus property: State of the copy operation identified by * x-ms-copy-id. Possible values include: 'pending', 'success', 'aborted', * 'failed'. * * @param copyStatus the copyStatus value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyStatus(CopyStatusType copyStatus) { this.copyStatus = copyStatus; return this; } /** * Get the leaseDuration property: When a blob is leased, specifies whether * the lease is of infinite or fixed duration. Possible values include: * 'infinite', 'fixed'. * * @return the leaseDuration value. */ public LeaseDurationType getLeaseDuration() { return this.leaseDuration; } /** * Set the leaseDuration property: When a blob is leased, specifies whether * the lease is of infinite or fixed duration. Possible values include: * 'infinite', 'fixed'. * * @param leaseDuration the leaseDuration value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLeaseDuration(LeaseDurationType leaseDuration) { this.leaseDuration = leaseDuration; return this; } /** * Get the leaseState property: Lease state of the blob. Possible values * include: 'available', 'leased', 'expired', 'breaking', 'broken'. * * @return the leaseState value. */ public LeaseStateType getLeaseState() { return this.leaseState; } /** * Set the leaseState property: Lease state of the blob. Possible values * include: 'available', 'leased', 'expired', 'breaking', 'broken'. * * @param leaseState the leaseState value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLeaseState(LeaseStateType leaseState) { this.leaseState = leaseState; return this; } /** * Get the leaseStatus property: The current lease status of the blob. * Possible values include: 'locked', 'unlocked'. * * @return the leaseStatus value. */ public LeaseStatusType getLeaseStatus() { return this.leaseStatus; } /** * Set the leaseStatus property: The current lease status of the blob. * Possible values include: 'locked', 'unlocked'. * * @param leaseStatus the leaseStatus value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLeaseStatus(LeaseStatusType leaseStatus) { this.leaseStatus = leaseStatus; return this; } /** * Get the clientRequestId property: If a client request id header is sent * in the request, this header will be present in the response with the * same value. * * @return the clientRequestId value. */ public String getClientRequestId() { return this.clientRequestId; } /** * Set the clientRequestId property: If a client request id header is sent * in the request, this header will be present in the response with the * same value. * * @param clientRequestId the clientRequestId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setClientRequestId(String clientRequestId) { this.clientRequestId = clientRequestId; return this; } /** * Get the requestId property: This header uniquely identifies the request * that was made and can be used for troubleshooting the request. * * @return the requestId value. */ public String getRequestId() { return this.requestId; } /** * Set the requestId property: This header uniquely identifies the request * that was made and can be used for troubleshooting the request. * * @param requestId the requestId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setRequestId(String requestId) { this.requestId = requestId; return this; } /** * Get the version property: Indicates the version of the Blob service used * to execute the request. This header is returned for requests made * against version 2009-09-19 and above. * * @return the version value. */ public String getVersion() { return this.version; } /** * Set the version property: Indicates the version of the Blob service used * to execute the request. This header is returned for requests made * against version 2009-09-19 and above. * * @param version the version value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setVersion(String version) { this.version = version; return this; } /** * Get the versionId property: A DateTime value returned by the service * that uniquely identifies the blob. The value of this header indicates * the blob version, and may be used in subsequent requests to access this * version of the blob. * * @return the versionId value. */ public String getVersionId() { return this.versionId; } /** * Set the versionId property: A DateTime value returned by the service * that uniquely identifies the blob. The value of this header indicates * the blob version, and may be used in subsequent requests to access this * version of the blob. * * @param versionId the versionId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setVersionId(String versionId) { this.versionId = versionId; return this; } /** * Get the acceptRanges property: Indicates that the service supports * requests for partial blob content. * * @return the acceptRanges value. */ public String getAcceptRanges() { return this.acceptRanges; } /** * Set the acceptRanges property: Indicates that the service supports * requests for partial blob content. * * @param acceptRanges the acceptRanges value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setAcceptRanges(String acceptRanges) { this.acceptRanges = acceptRanges; return this; } /** * Get the dateProperty property: UTC date/time value generated by the * service that indicates the time at which the response was initiated. * * @return the dateProperty value. */ public OffsetDateTime getDateProperty() { return this.dateProperty; } /** * Set the dateProperty property: UTC date/time value generated by the * service that indicates the time at which the response was initiated. * * @param dateProperty the dateProperty value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setDateProperty(OffsetDateTime dateProperty) { this.dateProperty = dateProperty; return this; } /** * Get the blobCommittedBlockCount property: The number of committed blocks * present in the blob. This header is returned only for append blobs. * * @return the blobCommittedBlockCount value. */ public Integer getBlobCommittedBlockCount() { return this.blobCommittedBlockCount; } /** * Set the blobCommittedBlockCount property: The number of committed blocks * present in the blob. This header is returned only for append blobs. * * @param blobCommittedBlockCount the blobCommittedBlockCount value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobCommittedBlockCount(Integer blobCommittedBlockCount) { this.blobCommittedBlockCount = blobCommittedBlockCount; return this; } /** * Get the isServerEncrypted property: The value of this header is set to * true if the blob data and application metadata are completely encrypted * using the specified algorithm. Otherwise, the value is set to false * (when the blob is unencrypted, or if only parts of the blob/application * metadata are encrypted). * * @return the isServerEncrypted value. */ public Boolean isServerEncrypted() { return this.isServerEncrypted; } /** * Set the isServerEncrypted property: The value of this header is set to * true if the blob data and application metadata are completely encrypted * using the specified algorithm. Otherwise, the value is set to false * (when the blob is unencrypted, or if only parts of the blob/application * metadata are encrypted). * * @param isServerEncrypted the isServerEncrypted value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setIsServerEncrypted(Boolean isServerEncrypted) { this.isServerEncrypted = isServerEncrypted; return this; } /** * Get the encryptionKeySha256 property: The SHA-256 hash of the encryption * key used to encrypt the blob. This header is only returned when the blob * was encrypted with a customer-provided key. * * @return the encryptionKeySha256 value. */ public String getEncryptionKeySha256() { return this.encryptionKeySha256; } /** * Set the encryptionKeySha256 property: The SHA-256 hash of the encryption * key used to encrypt the blob. This header is only returned when the blob * was encrypted with a customer-provided key. * * @param encryptionKeySha256 the encryptionKeySha256 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setEncryptionKeySha256(String encryptionKeySha256) { this.encryptionKeySha256 = encryptionKeySha256; return this; } /** * Get the encryptionScope property: Returns the name of the encryption * scope used to encrypt the blob contents and application metadata. Note * that the absence of this header implies use of the default account * encryption scope. * * @return the encryptionScope value. */ public String getEncryptionScope() { return this.encryptionScope; } /** * Set the encryptionScope property: Returns the name of the encryption * scope used to encrypt the blob contents and application metadata. Note * that the absence of this header implies use of the default account * encryption scope. * * @param encryptionScope the encryptionScope value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setEncryptionScope(String encryptionScope) { this.encryptionScope = encryptionScope; return this; } /** * Get the blobContentMD5 property: If the blob has a MD5 hash, and if * request contains range header (Range or x-ms-range), this response * header is returned with the value of the whole blob's MD5 value. This * value may or may not be equal to the value returned in Content-MD5 * header, with the latter calculated from the requested range. * * @return the blobContentMD5 value. */ public byte[] getBlobContentMD5() { return CoreUtils.clone(this.blobContentMD5); } /** * Set the blobContentMD5 property: If the blob has a MD5 hash, and if * request contains range header (Range or x-ms-range), this response * header is returned with the value of the whole blob's MD5 value. This * value may or may not be equal to the value returned in Content-MD5 * header, with the latter calculated from the requested range. * * @param blobContentMD5 the blobContentMD5 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobContentMD5(byte[] blobContentMD5) { this.blobContentMD5 = CoreUtils.clone(blobContentMD5); return this; } /** * Get the tagCount property: The number of tags associated with the blob. * * @return the tagCount value. */ public Long getTagCount() { return this.tagCount; } /** * Set the tagCount property: The number of tags associated with the blob. * * @param tagCount the tagCount value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setTagCount(Long tagCount) { this.tagCount = tagCount; return this; } /** * Get the contentCrc64 property: If the request is to read a specified * range and the x-ms-range-get-content-crc64 is set to true, then the * request returns a crc64 for the range, as long as the range size is less * than or equal to 4 MB. If both x-ms-range-get-content-crc64 &amp; * x-ms-range-get-content-md5 is specified in the same request, it will * fail with 400(Bad Request). * * @return the contentCrc64 value. */ public byte[] getContentCrc64() { return CoreUtils.clone(this.contentCrc64); } /** * Set the contentCrc64 property: If the request is to read a specified * range and the x-ms-range-get-content-crc64 is set to true, then the * request returns a crc64 for the range, as long as the range size is less * than or equal to 4 MB. If both x-ms-range-get-content-crc64 &amp; * x-ms-range-get-content-md5 is specified in the same request, it will * fail with 400(Bad Request). * * @param contentCrc64 the contentCrc64 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentCrc64(byte[] contentCrc64) { this.contentCrc64 = CoreUtils.clone(contentCrc64); return this; } /** * Get the errorCode property: The errorCode property. * * @return the errorCode value. */ public String getErrorCode() { return this.errorCode; } /** * Set the errorCode property: The errorCode property. * * @param errorCode the errorCode value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setErrorCode(String errorCode) { this.errorCode = errorCode; return this; } }
Maybe `objectReplicationSourceProperties`? Since there's already another variable to hold the destination policy header if we get back the OR info of a destination blob.
public BlobDownloadHeaders() { objectReplicationPolicies = null; }
objectReplicationPolicies = null;
public BlobDownloadHeaders() { objectReplicationSourcePolicies = null; }
class BlobDownloadHeaders { /** * Instantiates an empty {@code BlobDownloadHeaders}. */ /** * Instantiates a {@code BlobDownloadHeaders} object based on the generated, internal version of the type. * @param headers The generated headers type from which to extract values. */ public BlobDownloadHeaders(com.azure.storage.blob.implementation.models.BlobDownloadHeaders headers) { /* We have these two types because we needed to update this interface in a way that could not be generated (getObjectReplicationSourcePolicies), so we switched to generating BlobDownloadHeaders into implementation and wrapping it. Because it's headers type, we couldn't change the name of the generated type. */ this.lastModified = headers.getLastModified(); this.metadata = headers.getMetadata(); this.eTag = headers.getETag(); this.contentLength = headers.getContentLength(); this.contentType = headers.getContentType(); this.contentRange = headers.getContentRange(); this.contentEncoding = headers.getContentEncoding(); this.contentLanguage = headers.getContentLanguage(); this.contentMd5 = headers.getContentMd5(); this.contentDisposition = headers.getContentDisposition(); this.cacheControl = headers.getCacheControl(); this.blobSequenceNumber = headers.getBlobSequenceNumber(); this.blobType = headers.getBlobType(); this.leaseStatus = headers.getLeaseStatus(); this.leaseState = headers.getLeaseState(); this.leaseDuration = headers.getLeaseDuration(); this.copyId = headers.getCopyId(); this.copyStatus = headers.getCopyStatus(); this.copySource = headers.getCopySource(); this.copyProgress = headers.getCopyProgress(); this.copyCompletionTime = headers.getCopyCompletionTime(); this.copyStatusDescription = headers.getCopyStatusDescription(); this.isServerEncrypted = headers.isServerEncrypted(); this.clientRequestId = headers.getClientRequestId(); this.requestId = headers.getRequestId(); this.version = headers.getVersion(); this.versionId = headers.getVersionId(); this.acceptRanges = headers.getAcceptRanges(); this.dateProperty = headers.getDateProperty(); this.blobCommittedBlockCount = headers.getBlobCommittedBlockCount(); this.encryptionKeySha256 = headers.getEncryptionKeySha256(); this.encryptionScope = headers.getEncryptionScope(); this.blobContentMD5 = headers.getBlobContentMD5(); this.contentCrc64 = headers.getContentCrc64(); this.errorCode = headers.getErrorCode(); this.tagCount = headers.getTagCount(); Map<String, String> objectReplicationStatus = headers.getObjectReplicationRules(); this.objectReplicationPolicies = new ArrayList<>(); objectReplicationStatus = objectReplicationStatus == null ? new HashMap<>() : objectReplicationStatus; this.objectReplicationDestinationPolicyId = objectReplicationStatus.getOrDefault("policy-id", null); if (this.objectReplicationDestinationPolicyId == null) { for (Map.Entry<String, String> entry : objectReplicationStatus.entrySet()) { String[] split = entry.getKey().split("_"); String policyId = split[0]; String ruleId = split[1]; ObjectReplicationRule rule = new ObjectReplicationRule(ruleId, ObjectReplicationStatus.fromString(entry.getValue())); int index = ObjectReplicationPolicy.getIndexOfObjectReplicationPolicy(policyId, this.objectReplicationPolicies); if (index == -1) { ObjectReplicationPolicy policy = new ObjectReplicationPolicy(policyId); policy.putRule(rule); this.objectReplicationPolicies.add(policy); } else { ObjectReplicationPolicy policy = objectReplicationPolicies.get(index); policy.putRule(rule); } } } } /* * Returns the date and time the container was last modified. Any operation * that modifies the blob, including an update of the blob's metadata or * properties, changes the last-modified time of the blob. */ @JsonProperty(value = "Last-Modified") private OffsetDateTime lastModified; /* * The metadata property. */ @HeaderCollection("x-ms-meta-") private Map<String, String> metadata; /* * Optional. Only valid when Object Replication is enabled for the storage * container and on the destination blob of the replication. */ @JsonProperty(value = "x-ms-or-policy-id") private String objectReplicationDestinationPolicyId; /* * The objectReplicationRuleStatus property. */ @HeaderCollection("x-ms-or-") private List<ObjectReplicationPolicy> objectReplicationPolicies; /* * The number of bytes present in the response body. */ @JsonProperty(value = "Content-Length") private Long contentLength; /* * The media type of the body of the response. For Download Blob this is * 'application/octet-stream' */ @JsonProperty(value = "Content-Type") private String contentType; /* * Indicates the range of bytes returned in the event that the client * requested a subset of the blob by setting the 'Range' request header. */ @JsonProperty(value = "Content-Range") private String contentRange; /* * The ETag contains a value that you can use to perform operations * conditionally. If the request version is 2011-08-18 or newer, the ETag * value will be in quotes. */ @JsonProperty(value = "ETag") private String eTag; /* * If the blob has an MD5 hash and this operation is to read the full blob, * this response header is returned so that the client can check for * message content integrity. */ @JsonProperty(value = "Content-MD5") private byte[] contentMd5; /* * This header returns the value that was specified for the * Content-Encoding request header */ @JsonProperty(value = "Content-Encoding") private String contentEncoding; /* * This header is returned if it was previously specified for the blob. */ @JsonProperty(value = "Cache-Control") private String cacheControl; /* * This header returns the value that was specified for the * 'x-ms-blob-content-disposition' header. The Content-Disposition response * header field conveys additional information about how to process the * response payload, and also can be used to attach additional metadata. * For example, if set to attachment, it indicates that the user-agent * should not display the response, but instead show a Save As dialog with * a filename other than the blob name specified. */ @JsonProperty(value = "Content-Disposition") private String contentDisposition; /* * This header returns the value that was specified for the * Content-Language request header. */ @JsonProperty(value = "Content-Language") private String contentLanguage; /* * The current sequence number for a page blob. This header is not returned * for block blobs or append blobs */ @JsonProperty(value = "x-ms-blob-sequence-number") private Long blobSequenceNumber; /* * The blob's type. Possible values include: 'BlockBlob', 'PageBlob', * 'AppendBlob' */ @JsonProperty(value = "x-ms-blob-type") private BlobType blobType; /* * Conclusion time of the last attempted Copy Blob operation where this * blob was the destination blob. This value can specify the time of a * completed, aborted, or failed copy attempt. This header does not appear * if a copy is pending, if this blob has never been the destination in a * Copy Blob operation, or if this blob has been modified after a concluded * Copy Blob operation using Set Blob Properties, Put Blob, or Put Block * List. */ @JsonProperty(value = "x-ms-copy-completion-time") private OffsetDateTime copyCompletionTime; /* * Only appears when x-ms-copy-status is failed or pending. Describes the * cause of the last fatal or non-fatal copy operation failure. This header * does not appear if this blob has never been the destination in a Copy * Blob operation, or if this blob has been modified after a concluded Copy * Blob operation using Set Blob Properties, Put Blob, or Put Block List */ @JsonProperty(value = "x-ms-copy-status-description") private String copyStatusDescription; /* * String identifier for this copy operation. Use with Get Blob Properties * to check the status of this copy operation, or pass to Abort Copy Blob * to abort a pending copy. */ @JsonProperty(value = "x-ms-copy-id") private String copyId; /* * Contains the number of bytes copied and the total bytes in the source in * the last attempted Copy Blob operation where this blob was the * destination blob. Can show between 0 and Content-Length bytes copied. * This header does not appear if this blob has never been the destination * in a Copy Blob operation, or if this blob has been modified after a * concluded Copy Blob operation using Set Blob Properties, Put Blob, or * Put Block List */ @JsonProperty(value = "x-ms-copy-progress") private String copyProgress; /* * URL up to 2 KB in length that specifies the source blob or file used in * the last attempted Copy Blob operation where this blob was the * destination blob. This header does not appear if this blob has never * been the destination in a Copy Blob operation, or if this blob has been * modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. */ @JsonProperty(value = "x-ms-copy-source") private String copySource; /* * State of the copy operation identified by x-ms-copy-id. Possible values * include: 'pending', 'success', 'aborted', 'failed' */ @JsonProperty(value = "x-ms-copy-status") private CopyStatusType copyStatus; /* * When a blob is leased, specifies whether the lease is of infinite or * fixed duration. Possible values include: 'infinite', 'fixed' */ @JsonProperty(value = "x-ms-lease-duration") private LeaseDurationType leaseDuration; /* * Lease state of the blob. Possible values include: 'available', 'leased', * 'expired', 'breaking', 'broken' */ @JsonProperty(value = "x-ms-lease-state") private LeaseStateType leaseState; /* * The current lease status of the blob. Possible values include: 'locked', * 'unlocked' */ @JsonProperty(value = "x-ms-lease-status") private LeaseStatusType leaseStatus; /* * If a client request id header is sent in the request, this header will * be present in the response with the same value. */ @JsonProperty(value = "x-ms-client-request-id") private String clientRequestId; /* * This header uniquely identifies the request that was made and can be * used for troubleshooting the request. */ @JsonProperty(value = "x-ms-request-id") private String requestId; /* * Indicates the version of the Blob service used to execute the request. * This header is returned for requests made against version 2009-09-19 and * above. */ @JsonProperty(value = "x-ms-version") private String version; /* * A DateTime value returned by the service that uniquely identifies the * blob. The value of this header indicates the blob version, and may be * used in subsequent requests to access this version of the blob. */ @JsonProperty(value = "x-ms-version-id") private String versionId; /* * Indicates that the service supports requests for partial blob content. */ @JsonProperty(value = "Accept-Ranges") private String acceptRanges; /* * UTC date/time value generated by the service that indicates the time at * which the response was initiated */ @JsonProperty(value = "Date") private OffsetDateTime dateProperty; /* * The number of committed blocks present in the blob. This header is * returned only for append blobs. */ @JsonProperty(value = "x-ms-blob-committed-block-count") private Integer blobCommittedBlockCount; /* * The value of this header is set to true if the blob data and application * metadata are completely encrypted using the specified algorithm. * Otherwise, the value is set to false (when the blob is unencrypted, or * if only parts of the blob/application metadata are encrypted). */ @JsonProperty(value = "x-ms-server-encrypted") private Boolean isServerEncrypted; /* * The SHA-256 hash of the encryption key used to encrypt the blob. This * header is only returned when the blob was encrypted with a * customer-provided key. */ @JsonProperty(value = "x-ms-encryption-key-sha256") private String encryptionKeySha256; /* * Returns the name of the encryption scope used to encrypt the blob * contents and application metadata. Note that the absence of this header * implies use of the default account encryption scope. */ @JsonProperty(value = "x-ms-encryption-scope") private String encryptionScope; /* * If the blob has a MD5 hash, and if request contains range header (Range * or x-ms-range), this response header is returned with the value of the * whole blob's MD5 value. This value may or may not be equal to the value * returned in Content-MD5 header, with the latter calculated from the * requested range */ @JsonProperty(value = "x-ms-blob-content-md5") private byte[] blobContentMD5; /* * The number of tags associated with the blob */ @JsonProperty(value = "x-ms-tag-count") private Long tagCount; /* * If the request is to read a specified range and the * x-ms-range-get-content-crc64 is set to true, then the request returns a * crc64 for the range, as long as the range size is less than or equal to * 4 MB. If both x-ms-range-get-content-crc64 & x-ms-range-get-content-md5 * is specified in the same request, it will fail with 400(Bad Request) */ @JsonProperty(value = "x-ms-content-crc64") private byte[] contentCrc64; /* * The errorCode property. */ @JsonProperty(value = "x-ms-error-code") private String errorCode; /** * Get the lastModified property: Returns the date and time the container * was last modified. Any operation that modifies the blob, including an * update of the blob's metadata or properties, changes the last-modified * time of the blob. * * @return the lastModified value. */ public OffsetDateTime getLastModified() { return this.lastModified; } /** * Set the lastModified property: Returns the date and time the container * was last modified. Any operation that modifies the blob, including an * update of the blob's metadata or properties, changes the last-modified * time of the blob. * * @param lastModified the lastModified value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLastModified(OffsetDateTime lastModified) { this.lastModified = lastModified; return this; } /** * Get the metadata property: The metadata property. * * @return the metadata value. */ public Map<String, String> getMetadata() { return this.metadata; } /** * Set the metadata property: The metadata property. * * @param metadata the metadata value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setMetadata(Map<String, String> metadata) { this.metadata = metadata; return this; } /** * Get the objectReplicationDestinationPolicyId property: Optional. Only * valid when Object Replication is enabled for the storage container and * on the destination blob of the replication. * * @return the objectReplicationDestinationPolicyId value. */ public String getObjectReplicationDestinationPolicyId() { return this.objectReplicationDestinationPolicyId; } /** * Set the objectReplicationDestinationPolicyId property: Optional. Only * valid when Object Replication is enabled for the storage container and * on the destination blob of the replication. * * @param objectReplicationDestinationPolicyId the * objectReplicationDestinationPolicyId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setObjectReplicationDestinationPolicyId(String objectReplicationDestinationPolicyId) { this.objectReplicationDestinationPolicyId = objectReplicationDestinationPolicyId; return this; } /** * Get the objectReplicationPolicies property: The * objectReplicationPolicies property. * * @return the objectReplicationPolicies value. */ public List<ObjectReplicationPolicy> getObjectReplicationPolicies() { return this.objectReplicationPolicies; } /** * Set the objectReplicationPolicies property: The * objectReplicationPolicies property. * * @param objectReplicationPolicies the objectReplicationPolicies value * to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setObjectReplicationPolicies(List<ObjectReplicationPolicy> objectReplicationPolicies) { this.objectReplicationPolicies = objectReplicationPolicies; return this; } /** * Get the contentLength property: The number of bytes present in the * response body. * * @return the contentLength value. */ public Long getContentLength() { return this.contentLength; } /** * Set the contentLength property: The number of bytes present in the * response body. * * @param contentLength the contentLength value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentLength(Long contentLength) { this.contentLength = contentLength; return this; } /** * Get the contentType property: The media type of the body of the * response. For Download Blob this is 'application/octet-stream'. * * @return the contentType value. */ public String getContentType() { return this.contentType; } /** * Set the contentType property: The media type of the body of the * response. For Download Blob this is 'application/octet-stream'. * * @param contentType the contentType value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentType(String contentType) { this.contentType = contentType; return this; } /** * Get the contentRange property: Indicates the range of bytes returned in * the event that the client requested a subset of the blob by setting the * 'Range' request header. * * @return the contentRange value. */ public String getContentRange() { return this.contentRange; } /** * Set the contentRange property: Indicates the range of bytes returned in * the event that the client requested a subset of the blob by setting the * 'Range' request header. * * @param contentRange the contentRange value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentRange(String contentRange) { this.contentRange = contentRange; return this; } /** * Get the eTag property: The ETag contains a value that you can use to * perform operations conditionally. If the request version is 2011-08-18 * or newer, the ETag value will be in quotes. * * @return the eTag value. */ public String getETag() { return this.eTag; } /** * Set the eTag property: The ETag contains a value that you can use to * perform operations conditionally. If the request version is 2011-08-18 * or newer, the ETag value will be in quotes. * * @param eTag the eTag value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setETag(String eTag) { this.eTag = eTag; return this; } /** * Get the contentMd5 property: If the blob has an MD5 hash and this * operation is to read the full blob, this response header is returned so * that the client can check for message content integrity. * * @return the contentMd5 value. */ public byte[] getContentMd5() { return CoreUtils.clone(this.contentMd5); } /** * Set the contentMd5 property: If the blob has an MD5 hash and this * operation is to read the full blob, this response header is returned so * that the client can check for message content integrity. * * @param contentMd5 the contentMd5 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentMd5(byte[] contentMd5) { this.contentMd5 = CoreUtils.clone(contentMd5); return this; } /** * Get the contentEncoding property: This header returns the value that was * specified for the Content-Encoding request header. * * @return the contentEncoding value. */ public String getContentEncoding() { return this.contentEncoding; } /** * Set the contentEncoding property: This header returns the value that was * specified for the Content-Encoding request header. * * @param contentEncoding the contentEncoding value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentEncoding(String contentEncoding) { this.contentEncoding = contentEncoding; return this; } /** * Get the cacheControl property: This header is returned if it was * previously specified for the blob. * * @return the cacheControl value. */ public String getCacheControl() { return this.cacheControl; } /** * Set the cacheControl property: This header is returned if it was * previously specified for the blob. * * @param cacheControl the cacheControl value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCacheControl(String cacheControl) { this.cacheControl = cacheControl; return this; } /** * Get the contentDisposition property: This header returns the value that * was specified for the 'x-ms-blob-content-disposition' header. The * Content-Disposition response header field conveys additional information * about how to process the response payload, and also can be used to * attach additional metadata. For example, if set to attachment, it * indicates that the user-agent should not display the response, but * instead show a Save As dialog with a filename other than the blob name * specified. * * @return the contentDisposition value. */ public String getContentDisposition() { return this.contentDisposition; } /** * Set the contentDisposition property: This header returns the value that * was specified for the 'x-ms-blob-content-disposition' header. The * Content-Disposition response header field conveys additional information * about how to process the response payload, and also can be used to * attach additional metadata. For example, if set to attachment, it * indicates that the user-agent should not display the response, but * instead show a Save As dialog with a filename other than the blob name * specified. * * @param contentDisposition the contentDisposition value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentDisposition(String contentDisposition) { this.contentDisposition = contentDisposition; return this; } /** * Get the contentLanguage property: This header returns the value that was * specified for the Content-Language request header. * * @return the contentLanguage value. */ public String getContentLanguage() { return this.contentLanguage; } /** * Set the contentLanguage property: This header returns the value that was * specified for the Content-Language request header. * * @param contentLanguage the contentLanguage value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentLanguage(String contentLanguage) { this.contentLanguage = contentLanguage; return this; } /** * Get the blobSequenceNumber property: The current sequence number for a * page blob. This header is not returned for block blobs or append blobs. * * @return the blobSequenceNumber value. */ public Long getBlobSequenceNumber() { return this.blobSequenceNumber; } /** * Set the blobSequenceNumber property: The current sequence number for a * page blob. This header is not returned for block blobs or append blobs. * * @param blobSequenceNumber the blobSequenceNumber value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobSequenceNumber(Long blobSequenceNumber) { this.blobSequenceNumber = blobSequenceNumber; return this; } /** * Get the blobType property: The blob's type. Possible values include: * 'BlockBlob', 'PageBlob', 'AppendBlob'. * * @return the blobType value. */ public BlobType getBlobType() { return this.blobType; } /** * Set the blobType property: The blob's type. Possible values include: * 'BlockBlob', 'PageBlob', 'AppendBlob'. * * @param blobType the blobType value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobType(BlobType blobType) { this.blobType = blobType; return this; } /** * Get the copyCompletionTime property: Conclusion time of the last * attempted Copy Blob operation where this blob was the destination blob. * This value can specify the time of a completed, aborted, or failed copy * attempt. This header does not appear if a copy is pending, if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @return the copyCompletionTime value. */ public OffsetDateTime getCopyCompletionTime() { return this.copyCompletionTime; } /** * Set the copyCompletionTime property: Conclusion time of the last * attempted Copy Blob operation where this blob was the destination blob. * This value can specify the time of a completed, aborted, or failed copy * attempt. This header does not appear if a copy is pending, if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @param copyCompletionTime the copyCompletionTime value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyCompletionTime(OffsetDateTime copyCompletionTime) { this.copyCompletionTime = copyCompletionTime; return this; } /** * Get the copyStatusDescription property: Only appears when * x-ms-copy-status is failed or pending. Describes the cause of the last * fatal or non-fatal copy operation failure. This header does not appear * if this blob has never been the destination in a Copy Blob operation, or * if this blob has been modified after a concluded Copy Blob operation * using Set Blob Properties, Put Blob, or Put Block List. * * @return the copyStatusDescription value. */ public String getCopyStatusDescription() { return this.copyStatusDescription; } /** * Set the copyStatusDescription property: Only appears when * x-ms-copy-status is failed or pending. Describes the cause of the last * fatal or non-fatal copy operation failure. This header does not appear * if this blob has never been the destination in a Copy Blob operation, or * if this blob has been modified after a concluded Copy Blob operation * using Set Blob Properties, Put Blob, or Put Block List. * * @param copyStatusDescription the copyStatusDescription value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyStatusDescription(String copyStatusDescription) { this.copyStatusDescription = copyStatusDescription; return this; } /** * Get the copyId property: String identifier for this copy operation. Use * with Get Blob Properties to check the status of this copy operation, or * pass to Abort Copy Blob to abort a pending copy. * * @return the copyId value. */ public String getCopyId() { return this.copyId; } /** * Set the copyId property: String identifier for this copy operation. Use * with Get Blob Properties to check the status of this copy operation, or * pass to Abort Copy Blob to abort a pending copy. * * @param copyId the copyId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyId(String copyId) { this.copyId = copyId; return this; } /** * Get the copyProgress property: Contains the number of bytes copied and * the total bytes in the source in the last attempted Copy Blob operation * where this blob was the destination blob. Can show between 0 and * Content-Length bytes copied. This header does not appear if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @return the copyProgress value. */ public String getCopyProgress() { return this.copyProgress; } /** * Set the copyProgress property: Contains the number of bytes copied and * the total bytes in the source in the last attempted Copy Blob operation * where this blob was the destination blob. Can show between 0 and * Content-Length bytes copied. This header does not appear if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @param copyProgress the copyProgress value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyProgress(String copyProgress) { this.copyProgress = copyProgress; return this; } /** * Get the copySource property: URL up to 2 KB in length that specifies the * source blob or file used in the last attempted Copy Blob operation where * this blob was the destination blob. This header does not appear if this * blob has never been the destination in a Copy Blob operation, or if this * blob has been modified after a concluded Copy Blob operation using Set * Blob Properties, Put Blob, or Put Block List. * * @return the copySource value. */ public String getCopySource() { return this.copySource; } /** * Set the copySource property: URL up to 2 KB in length that specifies the * source blob or file used in the last attempted Copy Blob operation where * this blob was the destination blob. This header does not appear if this * blob has never been the destination in a Copy Blob operation, or if this * blob has been modified after a concluded Copy Blob operation using Set * Blob Properties, Put Blob, or Put Block List. * * @param copySource the copySource value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopySource(String copySource) { this.copySource = copySource; return this; } /** * Get the copyStatus property: State of the copy operation identified by * x-ms-copy-id. Possible values include: 'pending', 'success', 'aborted', * 'failed'. * * @return the copyStatus value. */ public CopyStatusType getCopyStatus() { return this.copyStatus; } /** * Set the copyStatus property: State of the copy operation identified by * x-ms-copy-id. Possible values include: 'pending', 'success', 'aborted', * 'failed'. * * @param copyStatus the copyStatus value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyStatus(CopyStatusType copyStatus) { this.copyStatus = copyStatus; return this; } /** * Get the leaseDuration property: When a blob is leased, specifies whether * the lease is of infinite or fixed duration. Possible values include: * 'infinite', 'fixed'. * * @return the leaseDuration value. */ public LeaseDurationType getLeaseDuration() { return this.leaseDuration; } /** * Set the leaseDuration property: When a blob is leased, specifies whether * the lease is of infinite or fixed duration. Possible values include: * 'infinite', 'fixed'. * * @param leaseDuration the leaseDuration value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLeaseDuration(LeaseDurationType leaseDuration) { this.leaseDuration = leaseDuration; return this; } /** * Get the leaseState property: Lease state of the blob. Possible values * include: 'available', 'leased', 'expired', 'breaking', 'broken'. * * @return the leaseState value. */ public LeaseStateType getLeaseState() { return this.leaseState; } /** * Set the leaseState property: Lease state of the blob. Possible values * include: 'available', 'leased', 'expired', 'breaking', 'broken'. * * @param leaseState the leaseState value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLeaseState(LeaseStateType leaseState) { this.leaseState = leaseState; return this; } /** * Get the leaseStatus property: The current lease status of the blob. * Possible values include: 'locked', 'unlocked'. * * @return the leaseStatus value. */ public LeaseStatusType getLeaseStatus() { return this.leaseStatus; } /** * Set the leaseStatus property: The current lease status of the blob. * Possible values include: 'locked', 'unlocked'. * * @param leaseStatus the leaseStatus value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLeaseStatus(LeaseStatusType leaseStatus) { this.leaseStatus = leaseStatus; return this; } /** * Get the clientRequestId property: If a client request id header is sent * in the request, this header will be present in the response with the * same value. * * @return the clientRequestId value. */ public String getClientRequestId() { return this.clientRequestId; } /** * Set the clientRequestId property: If a client request id header is sent * in the request, this header will be present in the response with the * same value. * * @param clientRequestId the clientRequestId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setClientRequestId(String clientRequestId) { this.clientRequestId = clientRequestId; return this; } /** * Get the requestId property: This header uniquely identifies the request * that was made and can be used for troubleshooting the request. * * @return the requestId value. */ public String getRequestId() { return this.requestId; } /** * Set the requestId property: This header uniquely identifies the request * that was made and can be used for troubleshooting the request. * * @param requestId the requestId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setRequestId(String requestId) { this.requestId = requestId; return this; } /** * Get the version property: Indicates the version of the Blob service used * to execute the request. This header is returned for requests made * against version 2009-09-19 and above. * * @return the version value. */ public String getVersion() { return this.version; } /** * Set the version property: Indicates the version of the Blob service used * to execute the request. This header is returned for requests made * against version 2009-09-19 and above. * * @param version the version value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setVersion(String version) { this.version = version; return this; } /** * Get the versionId property: A DateTime value returned by the service * that uniquely identifies the blob. The value of this header indicates * the blob version, and may be used in subsequent requests to access this * version of the blob. * * @return the versionId value. */ public String getVersionId() { return this.versionId; } /** * Set the versionId property: A DateTime value returned by the service * that uniquely identifies the blob. The value of this header indicates * the blob version, and may be used in subsequent requests to access this * version of the blob. * * @param versionId the versionId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setVersionId(String versionId) { this.versionId = versionId; return this; } /** * Get the acceptRanges property: Indicates that the service supports * requests for partial blob content. * * @return the acceptRanges value. */ public String getAcceptRanges() { return this.acceptRanges; } /** * Set the acceptRanges property: Indicates that the service supports * requests for partial blob content. * * @param acceptRanges the acceptRanges value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setAcceptRanges(String acceptRanges) { this.acceptRanges = acceptRanges; return this; } /** * Get the dateProperty property: UTC date/time value generated by the * service that indicates the time at which the response was initiated. * * @return the dateProperty value. */ public OffsetDateTime getDateProperty() { return this.dateProperty; } /** * Set the dateProperty property: UTC date/time value generated by the * service that indicates the time at which the response was initiated. * * @param dateProperty the dateProperty value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setDateProperty(OffsetDateTime dateProperty) { this.dateProperty = dateProperty; return this; } /** * Get the blobCommittedBlockCount property: The number of committed blocks * present in the blob. This header is returned only for append blobs. * * @return the blobCommittedBlockCount value. */ public Integer getBlobCommittedBlockCount() { return this.blobCommittedBlockCount; } /** * Set the blobCommittedBlockCount property: The number of committed blocks * present in the blob. This header is returned only for append blobs. * * @param blobCommittedBlockCount the blobCommittedBlockCount value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobCommittedBlockCount(Integer blobCommittedBlockCount) { this.blobCommittedBlockCount = blobCommittedBlockCount; return this; } /** * Get the isServerEncrypted property: The value of this header is set to * true if the blob data and application metadata are completely encrypted * using the specified algorithm. Otherwise, the value is set to false * (when the blob is unencrypted, or if only parts of the blob/application * metadata are encrypted). * * @return the isServerEncrypted value. */ public Boolean isServerEncrypted() { return this.isServerEncrypted; } /** * Set the isServerEncrypted property: The value of this header is set to * true if the blob data and application metadata are completely encrypted * using the specified algorithm. Otherwise, the value is set to false * (when the blob is unencrypted, or if only parts of the blob/application * metadata are encrypted). * * @param isServerEncrypted the isServerEncrypted value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setIsServerEncrypted(Boolean isServerEncrypted) { this.isServerEncrypted = isServerEncrypted; return this; } /** * Get the encryptionKeySha256 property: The SHA-256 hash of the encryption * key used to encrypt the blob. This header is only returned when the blob * was encrypted with a customer-provided key. * * @return the encryptionKeySha256 value. */ public String getEncryptionKeySha256() { return this.encryptionKeySha256; } /** * Set the encryptionKeySha256 property: The SHA-256 hash of the encryption * key used to encrypt the blob. This header is only returned when the blob * was encrypted with a customer-provided key. * * @param encryptionKeySha256 the encryptionKeySha256 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setEncryptionKeySha256(String encryptionKeySha256) { this.encryptionKeySha256 = encryptionKeySha256; return this; } /** * Get the encryptionScope property: Returns the name of the encryption * scope used to encrypt the blob contents and application metadata. Note * that the absence of this header implies use of the default account * encryption scope. * * @return the encryptionScope value. */ public String getEncryptionScope() { return this.encryptionScope; } /** * Set the encryptionScope property: Returns the name of the encryption * scope used to encrypt the blob contents and application metadata. Note * that the absence of this header implies use of the default account * encryption scope. * * @param encryptionScope the encryptionScope value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setEncryptionScope(String encryptionScope) { this.encryptionScope = encryptionScope; return this; } /** * Get the blobContentMD5 property: If the blob has a MD5 hash, and if * request contains range header (Range or x-ms-range), this response * header is returned with the value of the whole blob's MD5 value. This * value may or may not be equal to the value returned in Content-MD5 * header, with the latter calculated from the requested range. * * @return the blobContentMD5 value. */ public byte[] getBlobContentMD5() { return CoreUtils.clone(this.blobContentMD5); } /** * Set the blobContentMD5 property: If the blob has a MD5 hash, and if * request contains range header (Range or x-ms-range), this response * header is returned with the value of the whole blob's MD5 value. This * value may or may not be equal to the value returned in Content-MD5 * header, with the latter calculated from the requested range. * * @param blobContentMD5 the blobContentMD5 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobContentMD5(byte[] blobContentMD5) { this.blobContentMD5 = CoreUtils.clone(blobContentMD5); return this; } /** * Get the tagCount property: The number of tags associated with the blob. * * @return the tagCount value. */ public Long getTagCount() { return this.tagCount; } /** * Set the tagCount property: The number of tags associated with the blob. * * @param tagCount the tagCount value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setTagCount(Long tagCount) { this.tagCount = tagCount; return this; } /** * Get the contentCrc64 property: If the request is to read a specified * range and the x-ms-range-get-content-crc64 is set to true, then the * request returns a crc64 for the range, as long as the range size is less * than or equal to 4 MB. If both x-ms-range-get-content-crc64 &amp; * x-ms-range-get-content-md5 is specified in the same request, it will * fail with 400(Bad Request). * * @return the contentCrc64 value. */ public byte[] getContentCrc64() { return CoreUtils.clone(this.contentCrc64); } /** * Set the contentCrc64 property: If the request is to read a specified * range and the x-ms-range-get-content-crc64 is set to true, then the * request returns a crc64 for the range, as long as the range size is less * than or equal to 4 MB. If both x-ms-range-get-content-crc64 &amp; * x-ms-range-get-content-md5 is specified in the same request, it will * fail with 400(Bad Request). * * @param contentCrc64 the contentCrc64 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentCrc64(byte[] contentCrc64) { this.contentCrc64 = CoreUtils.clone(contentCrc64); return this; } /** * Get the errorCode property: The errorCode property. * * @return the errorCode value. */ public String getErrorCode() { return this.errorCode; } /** * Set the errorCode property: The errorCode property. * * @param errorCode the errorCode value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setErrorCode(String errorCode) { this.errorCode = errorCode; return this; } }
class BlobDownloadHeaders { /** * Instantiates an empty {@code BlobDownloadHeaders}. */ /** * Instantiates a {@code BlobDownloadHeaders} object based on the generated, internal version of the type. * @param headers The generated headers type from which to extract values. */ public BlobDownloadHeaders(com.azure.storage.blob.implementation.models.BlobDownloadHeaders headers) { /* We have these two types because we needed to update this interface in a way that could not be generated (getObjectReplicationSourcePolicies), so we switched to generating BlobDownloadHeaders into implementation and wrapping it. Because it's headers type, we couldn't change the name of the generated type. */ this.lastModified = headers.getLastModified(); this.metadata = headers.getMetadata(); this.eTag = headers.getETag(); this.contentLength = headers.getContentLength(); this.contentType = headers.getContentType(); this.contentRange = headers.getContentRange(); this.contentEncoding = headers.getContentEncoding(); this.contentLanguage = headers.getContentLanguage(); this.contentMd5 = headers.getContentMd5(); this.contentDisposition = headers.getContentDisposition(); this.cacheControl = headers.getCacheControl(); this.blobSequenceNumber = headers.getBlobSequenceNumber(); this.blobType = headers.getBlobType(); this.leaseStatus = headers.getLeaseStatus(); this.leaseState = headers.getLeaseState(); this.leaseDuration = headers.getLeaseDuration(); this.copyId = headers.getCopyId(); this.copyStatus = headers.getCopyStatus(); this.copySource = headers.getCopySource(); this.copyProgress = headers.getCopyProgress(); this.copyCompletionTime = headers.getCopyCompletionTime(); this.copyStatusDescription = headers.getCopyStatusDescription(); this.isServerEncrypted = headers.isServerEncrypted(); this.clientRequestId = headers.getClientRequestId(); this.requestId = headers.getRequestId(); this.version = headers.getVersion(); this.versionId = headers.getVersionId(); this.acceptRanges = headers.getAcceptRanges(); this.dateProperty = headers.getDateProperty(); this.blobCommittedBlockCount = headers.getBlobCommittedBlockCount(); this.encryptionKeySha256 = headers.getEncryptionKeySha256(); this.encryptionScope = headers.getEncryptionScope(); this.blobContentMD5 = headers.getBlobContentMD5(); this.contentCrc64 = headers.getContentCrc64(); this.errorCode = headers.getErrorCode(); this.tagCount = headers.getTagCount(); Map<String, String> objectReplicationStatus = headers.getObjectReplicationRules(); Map<String, List<ObjectReplicationRule>> internalSourcePolicies = new HashMap<>(); objectReplicationStatus = objectReplicationStatus == null ? new HashMap<>() : objectReplicationStatus; this.objectReplicationDestinationPolicyId = objectReplicationStatus.getOrDefault("policy-id", null); if (this.objectReplicationDestinationPolicyId == null) { for (Map.Entry<String, String> entry : objectReplicationStatus.entrySet()) { String[] split = entry.getKey().split("_"); String policyId = split[0]; String ruleId = split[1]; ObjectReplicationRule rule = new ObjectReplicationRule(ruleId, ObjectReplicationStatus.fromString(entry.getValue())); if (!internalSourcePolicies.containsKey(policyId)) { internalSourcePolicies.put(policyId, new ArrayList<>()); } internalSourcePolicies.get(policyId).add(rule); } } this.objectReplicationSourcePolicies = new ArrayList<>(); for (Map.Entry<String, List<ObjectReplicationRule>> entry : internalSourcePolicies.entrySet()) { this.objectReplicationSourcePolicies.add(new ObjectReplicationPolicy(entry.getKey(), entry.getValue())); } } /* * Returns the date and time the container was last modified. Any operation * that modifies the blob, including an update of the blob's metadata or * properties, changes the last-modified time of the blob. */ @JsonProperty(value = "Last-Modified") private OffsetDateTime lastModified; /* * The metadata property. */ @HeaderCollection("x-ms-meta-") private Map<String, String> metadata; /* * Optional. Only valid when Object Replication is enabled for the storage * container and on the destination blob of the replication. */ @JsonProperty(value = "x-ms-or-policy-id") private String objectReplicationDestinationPolicyId; /* * The objectReplicationRuleStatus property. */ @HeaderCollection("x-ms-or-") private List<ObjectReplicationPolicy> objectReplicationSourcePolicies; /* * The number of bytes present in the response body. */ @JsonProperty(value = "Content-Length") private Long contentLength; /* * The media type of the body of the response. For Download Blob this is * 'application/octet-stream' */ @JsonProperty(value = "Content-Type") private String contentType; /* * Indicates the range of bytes returned in the event that the client * requested a subset of the blob by setting the 'Range' request header. */ @JsonProperty(value = "Content-Range") private String contentRange; /* * The ETag contains a value that you can use to perform operations * conditionally. If the request version is 2011-08-18 or newer, the ETag * value will be in quotes. */ @JsonProperty(value = "ETag") private String eTag; /* * If the blob has an MD5 hash and this operation is to read the full blob, * this response header is returned so that the client can check for * message content integrity. */ @JsonProperty(value = "Content-MD5") private byte[] contentMd5; /* * This header returns the value that was specified for the * Content-Encoding request header */ @JsonProperty(value = "Content-Encoding") private String contentEncoding; /* * This header is returned if it was previously specified for the blob. */ @JsonProperty(value = "Cache-Control") private String cacheControl; /* * This header returns the value that was specified for the * 'x-ms-blob-content-disposition' header. The Content-Disposition response * header field conveys additional information about how to process the * response payload, and also can be used to attach additional metadata. * For example, if set to attachment, it indicates that the user-agent * should not display the response, but instead show a Save As dialog with * a filename other than the blob name specified. */ @JsonProperty(value = "Content-Disposition") private String contentDisposition; /* * This header returns the value that was specified for the * Content-Language request header. */ @JsonProperty(value = "Content-Language") private String contentLanguage; /* * The current sequence number for a page blob. This header is not returned * for block blobs or append blobs */ @JsonProperty(value = "x-ms-blob-sequence-number") private Long blobSequenceNumber; /* * The blob's type. Possible values include: 'BlockBlob', 'PageBlob', * 'AppendBlob' */ @JsonProperty(value = "x-ms-blob-type") private BlobType blobType; /* * Conclusion time of the last attempted Copy Blob operation where this * blob was the destination blob. This value can specify the time of a * completed, aborted, or failed copy attempt. This header does not appear * if a copy is pending, if this blob has never been the destination in a * Copy Blob operation, or if this blob has been modified after a concluded * Copy Blob operation using Set Blob Properties, Put Blob, or Put Block * List. */ @JsonProperty(value = "x-ms-copy-completion-time") private OffsetDateTime copyCompletionTime; /* * Only appears when x-ms-copy-status is failed or pending. Describes the * cause of the last fatal or non-fatal copy operation failure. This header * does not appear if this blob has never been the destination in a Copy * Blob operation, or if this blob has been modified after a concluded Copy * Blob operation using Set Blob Properties, Put Blob, or Put Block List */ @JsonProperty(value = "x-ms-copy-status-description") private String copyStatusDescription; /* * String identifier for this copy operation. Use with Get Blob Properties * to check the status of this copy operation, or pass to Abort Copy Blob * to abort a pending copy. */ @JsonProperty(value = "x-ms-copy-id") private String copyId; /* * Contains the number of bytes copied and the total bytes in the source in * the last attempted Copy Blob operation where this blob was the * destination blob. Can show between 0 and Content-Length bytes copied. * This header does not appear if this blob has never been the destination * in a Copy Blob operation, or if this blob has been modified after a * concluded Copy Blob operation using Set Blob Properties, Put Blob, or * Put Block List */ @JsonProperty(value = "x-ms-copy-progress") private String copyProgress; /* * URL up to 2 KB in length that specifies the source blob or file used in * the last attempted Copy Blob operation where this blob was the * destination blob. This header does not appear if this blob has never * been the destination in a Copy Blob operation, or if this blob has been * modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. */ @JsonProperty(value = "x-ms-copy-source") private String copySource; /* * State of the copy operation identified by x-ms-copy-id. Possible values * include: 'pending', 'success', 'aborted', 'failed' */ @JsonProperty(value = "x-ms-copy-status") private CopyStatusType copyStatus; /* * When a blob is leased, specifies whether the lease is of infinite or * fixed duration. Possible values include: 'infinite', 'fixed' */ @JsonProperty(value = "x-ms-lease-duration") private LeaseDurationType leaseDuration; /* * Lease state of the blob. Possible values include: 'available', 'leased', * 'expired', 'breaking', 'broken' */ @JsonProperty(value = "x-ms-lease-state") private LeaseStateType leaseState; /* * The current lease status of the blob. Possible values include: 'locked', * 'unlocked' */ @JsonProperty(value = "x-ms-lease-status") private LeaseStatusType leaseStatus; /* * If a client request id header is sent in the request, this header will * be present in the response with the same value. */ @JsonProperty(value = "x-ms-client-request-id") private String clientRequestId; /* * This header uniquely identifies the request that was made and can be * used for troubleshooting the request. */ @JsonProperty(value = "x-ms-request-id") private String requestId; /* * Indicates the version of the Blob service used to execute the request. * This header is returned for requests made against version 2009-09-19 and * above. */ @JsonProperty(value = "x-ms-version") private String version; /* * A DateTime value returned by the service that uniquely identifies the * blob. The value of this header indicates the blob version, and may be * used in subsequent requests to access this version of the blob. */ @JsonProperty(value = "x-ms-version-id") private String versionId; /* * Indicates that the service supports requests for partial blob content. */ @JsonProperty(value = "Accept-Ranges") private String acceptRanges; /* * UTC date/time value generated by the service that indicates the time at * which the response was initiated */ @JsonProperty(value = "Date") private OffsetDateTime dateProperty; /* * The number of committed blocks present in the blob. This header is * returned only for append blobs. */ @JsonProperty(value = "x-ms-blob-committed-block-count") private Integer blobCommittedBlockCount; /* * The value of this header is set to true if the blob data and application * metadata are completely encrypted using the specified algorithm. * Otherwise, the value is set to false (when the blob is unencrypted, or * if only parts of the blob/application metadata are encrypted). */ @JsonProperty(value = "x-ms-server-encrypted") private Boolean isServerEncrypted; /* * The SHA-256 hash of the encryption key used to encrypt the blob. This * header is only returned when the blob was encrypted with a * customer-provided key. */ @JsonProperty(value = "x-ms-encryption-key-sha256") private String encryptionKeySha256; /* * Returns the name of the encryption scope used to encrypt the blob * contents and application metadata. Note that the absence of this header * implies use of the default account encryption scope. */ @JsonProperty(value = "x-ms-encryption-scope") private String encryptionScope; /* * If the blob has a MD5 hash, and if request contains range header (Range * or x-ms-range), this response header is returned with the value of the * whole blob's MD5 value. This value may or may not be equal to the value * returned in Content-MD5 header, with the latter calculated from the * requested range */ @JsonProperty(value = "x-ms-blob-content-md5") private byte[] blobContentMD5; /* * The number of tags associated with the blob */ @JsonProperty(value = "x-ms-tag-count") private Long tagCount; /* * If the request is to read a specified range and the * x-ms-range-get-content-crc64 is set to true, then the request returns a * crc64 for the range, as long as the range size is less than or equal to * 4 MB. If both x-ms-range-get-content-crc64 & x-ms-range-get-content-md5 * is specified in the same request, it will fail with 400(Bad Request) */ @JsonProperty(value = "x-ms-content-crc64") private byte[] contentCrc64; /* * The errorCode property. */ @JsonProperty(value = "x-ms-error-code") private String errorCode; /** * Get the lastModified property: Returns the date and time the container * was last modified. Any operation that modifies the blob, including an * update of the blob's metadata or properties, changes the last-modified * time of the blob. * * @return the lastModified value. */ public OffsetDateTime getLastModified() { return this.lastModified; } /** * Set the lastModified property: Returns the date and time the container * was last modified. Any operation that modifies the blob, including an * update of the blob's metadata or properties, changes the last-modified * time of the blob. * * @param lastModified the lastModified value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLastModified(OffsetDateTime lastModified) { this.lastModified = lastModified; return this; } /** * Get the metadata property: The metadata property. * * @return the metadata value. */ public Map<String, String> getMetadata() { return this.metadata; } /** * Set the metadata property: The metadata property. * * @param metadata the metadata value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setMetadata(Map<String, String> metadata) { this.metadata = metadata; return this; } /** * Get the objectReplicationDestinationPolicyId property: Optional. Only * valid when Object Replication is enabled for the storage container and * on the destination blob of the replication. * * @return the objectReplicationDestinationPolicyId value. */ public String getObjectReplicationDestinationPolicyId() { return this.objectReplicationDestinationPolicyId; } /** * Set the objectReplicationDestinationPolicyId property: Optional. Only * valid when Object Replication is enabled for the storage container and * on the destination blob of the replication. * * @param objectReplicationDestinationPolicyId the * objectReplicationDestinationPolicyId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setObjectReplicationDestinationPolicyId(String objectReplicationDestinationPolicyId) { this.objectReplicationDestinationPolicyId = objectReplicationDestinationPolicyId; return this; } /** * Get the objectReplicationSourcePolicies property: The * objectReplicationSourcePolicies property. * * @return the objectReplicationSourcePolicies value. */ public List<ObjectReplicationPolicy> getObjectReplicationSourcePolicies() { return Collections.unmodifiableList(this.objectReplicationSourcePolicies); } /** * Set the objectReplicationSourcePolicies property: The * objectReplicationSourcePolicies property. * * @param objectReplicationSourcePolicies the objectReplicationSourcePolicies value * to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setObjectReplicationSourcePolicies( List<ObjectReplicationPolicy> objectReplicationSourcePolicies) { this.objectReplicationSourcePolicies = objectReplicationSourcePolicies; return this; } /** * Get the contentLength property: The number of bytes present in the * response body. * * @return the contentLength value. */ public Long getContentLength() { return this.contentLength; } /** * Set the contentLength property: The number of bytes present in the * response body. * * @param contentLength the contentLength value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentLength(Long contentLength) { this.contentLength = contentLength; return this; } /** * Get the contentType property: The media type of the body of the * response. For Download Blob this is 'application/octet-stream'. * * @return the contentType value. */ public String getContentType() { return this.contentType; } /** * Set the contentType property: The media type of the body of the * response. For Download Blob this is 'application/octet-stream'. * * @param contentType the contentType value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentType(String contentType) { this.contentType = contentType; return this; } /** * Get the contentRange property: Indicates the range of bytes returned in * the event that the client requested a subset of the blob by setting the * 'Range' request header. * * @return the contentRange value. */ public String getContentRange() { return this.contentRange; } /** * Set the contentRange property: Indicates the range of bytes returned in * the event that the client requested a subset of the blob by setting the * 'Range' request header. * * @param contentRange the contentRange value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentRange(String contentRange) { this.contentRange = contentRange; return this; } /** * Get the eTag property: The ETag contains a value that you can use to * perform operations conditionally. If the request version is 2011-08-18 * or newer, the ETag value will be in quotes. * * @return the eTag value. */ public String getETag() { return this.eTag; } /** * Set the eTag property: The ETag contains a value that you can use to * perform operations conditionally. If the request version is 2011-08-18 * or newer, the ETag value will be in quotes. * * @param eTag the eTag value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setETag(String eTag) { this.eTag = eTag; return this; } /** * Get the contentMd5 property: If the blob has an MD5 hash and this * operation is to read the full blob, this response header is returned so * that the client can check for message content integrity. * * @return the contentMd5 value. */ public byte[] getContentMd5() { return CoreUtils.clone(this.contentMd5); } /** * Set the contentMd5 property: If the blob has an MD5 hash and this * operation is to read the full blob, this response header is returned so * that the client can check for message content integrity. * * @param contentMd5 the contentMd5 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentMd5(byte[] contentMd5) { this.contentMd5 = CoreUtils.clone(contentMd5); return this; } /** * Get the contentEncoding property: This header returns the value that was * specified for the Content-Encoding request header. * * @return the contentEncoding value. */ public String getContentEncoding() { return this.contentEncoding; } /** * Set the contentEncoding property: This header returns the value that was * specified for the Content-Encoding request header. * * @param contentEncoding the contentEncoding value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentEncoding(String contentEncoding) { this.contentEncoding = contentEncoding; return this; } /** * Get the cacheControl property: This header is returned if it was * previously specified for the blob. * * @return the cacheControl value. */ public String getCacheControl() { return this.cacheControl; } /** * Set the cacheControl property: This header is returned if it was * previously specified for the blob. * * @param cacheControl the cacheControl value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCacheControl(String cacheControl) { this.cacheControl = cacheControl; return this; } /** * Get the contentDisposition property: This header returns the value that * was specified for the 'x-ms-blob-content-disposition' header. The * Content-Disposition response header field conveys additional information * about how to process the response payload, and also can be used to * attach additional metadata. For example, if set to attachment, it * indicates that the user-agent should not display the response, but * instead show a Save As dialog with a filename other than the blob name * specified. * * @return the contentDisposition value. */ public String getContentDisposition() { return this.contentDisposition; } /** * Set the contentDisposition property: This header returns the value that * was specified for the 'x-ms-blob-content-disposition' header. The * Content-Disposition response header field conveys additional information * about how to process the response payload, and also can be used to * attach additional metadata. For example, if set to attachment, it * indicates that the user-agent should not display the response, but * instead show a Save As dialog with a filename other than the blob name * specified. * * @param contentDisposition the contentDisposition value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentDisposition(String contentDisposition) { this.contentDisposition = contentDisposition; return this; } /** * Get the contentLanguage property: This header returns the value that was * specified for the Content-Language request header. * * @return the contentLanguage value. */ public String getContentLanguage() { return this.contentLanguage; } /** * Set the contentLanguage property: This header returns the value that was * specified for the Content-Language request header. * * @param contentLanguage the contentLanguage value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentLanguage(String contentLanguage) { this.contentLanguage = contentLanguage; return this; } /** * Get the blobSequenceNumber property: The current sequence number for a * page blob. This header is not returned for block blobs or append blobs. * * @return the blobSequenceNumber value. */ public Long getBlobSequenceNumber() { return this.blobSequenceNumber; } /** * Set the blobSequenceNumber property: The current sequence number for a * page blob. This header is not returned for block blobs or append blobs. * * @param blobSequenceNumber the blobSequenceNumber value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobSequenceNumber(Long blobSequenceNumber) { this.blobSequenceNumber = blobSequenceNumber; return this; } /** * Get the blobType property: The blob's type. Possible values include: * 'BlockBlob', 'PageBlob', 'AppendBlob'. * * @return the blobType value. */ public BlobType getBlobType() { return this.blobType; } /** * Set the blobType property: The blob's type. Possible values include: * 'BlockBlob', 'PageBlob', 'AppendBlob'. * * @param blobType the blobType value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobType(BlobType blobType) { this.blobType = blobType; return this; } /** * Get the copyCompletionTime property: Conclusion time of the last * attempted Copy Blob operation where this blob was the destination blob. * This value can specify the time of a completed, aborted, or failed copy * attempt. This header does not appear if a copy is pending, if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @return the copyCompletionTime value. */ public OffsetDateTime getCopyCompletionTime() { return this.copyCompletionTime; } /** * Set the copyCompletionTime property: Conclusion time of the last * attempted Copy Blob operation where this blob was the destination blob. * This value can specify the time of a completed, aborted, or failed copy * attempt. This header does not appear if a copy is pending, if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @param copyCompletionTime the copyCompletionTime value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyCompletionTime(OffsetDateTime copyCompletionTime) { this.copyCompletionTime = copyCompletionTime; return this; } /** * Get the copyStatusDescription property: Only appears when * x-ms-copy-status is failed or pending. Describes the cause of the last * fatal or non-fatal copy operation failure. This header does not appear * if this blob has never been the destination in a Copy Blob operation, or * if this blob has been modified after a concluded Copy Blob operation * using Set Blob Properties, Put Blob, or Put Block List. * * @return the copyStatusDescription value. */ public String getCopyStatusDescription() { return this.copyStatusDescription; } /** * Set the copyStatusDescription property: Only appears when * x-ms-copy-status is failed or pending. Describes the cause of the last * fatal or non-fatal copy operation failure. This header does not appear * if this blob has never been the destination in a Copy Blob operation, or * if this blob has been modified after a concluded Copy Blob operation * using Set Blob Properties, Put Blob, or Put Block List. * * @param copyStatusDescription the copyStatusDescription value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyStatusDescription(String copyStatusDescription) { this.copyStatusDescription = copyStatusDescription; return this; } /** * Get the copyId property: String identifier for this copy operation. Use * with Get Blob Properties to check the status of this copy operation, or * pass to Abort Copy Blob to abort a pending copy. * * @return the copyId value. */ public String getCopyId() { return this.copyId; } /** * Set the copyId property: String identifier for this copy operation. Use * with Get Blob Properties to check the status of this copy operation, or * pass to Abort Copy Blob to abort a pending copy. * * @param copyId the copyId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyId(String copyId) { this.copyId = copyId; return this; } /** * Get the copyProgress property: Contains the number of bytes copied and * the total bytes in the source in the last attempted Copy Blob operation * where this blob was the destination blob. Can show between 0 and * Content-Length bytes copied. This header does not appear if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @return the copyProgress value. */ public String getCopyProgress() { return this.copyProgress; } /** * Set the copyProgress property: Contains the number of bytes copied and * the total bytes in the source in the last attempted Copy Blob operation * where this blob was the destination blob. Can show between 0 and * Content-Length bytes copied. This header does not appear if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @param copyProgress the copyProgress value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyProgress(String copyProgress) { this.copyProgress = copyProgress; return this; } /** * Get the copySource property: URL up to 2 KB in length that specifies the * source blob or file used in the last attempted Copy Blob operation where * this blob was the destination blob. This header does not appear if this * blob has never been the destination in a Copy Blob operation, or if this * blob has been modified after a concluded Copy Blob operation using Set * Blob Properties, Put Blob, or Put Block List. * * @return the copySource value. */ public String getCopySource() { return this.copySource; } /** * Set the copySource property: URL up to 2 KB in length that specifies the * source blob or file used in the last attempted Copy Blob operation where * this blob was the destination blob. This header does not appear if this * blob has never been the destination in a Copy Blob operation, or if this * blob has been modified after a concluded Copy Blob operation using Set * Blob Properties, Put Blob, or Put Block List. * * @param copySource the copySource value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopySource(String copySource) { this.copySource = copySource; return this; } /** * Get the copyStatus property: State of the copy operation identified by * x-ms-copy-id. Possible values include: 'pending', 'success', 'aborted', * 'failed'. * * @return the copyStatus value. */ public CopyStatusType getCopyStatus() { return this.copyStatus; } /** * Set the copyStatus property: State of the copy operation identified by * x-ms-copy-id. Possible values include: 'pending', 'success', 'aborted', * 'failed'. * * @param copyStatus the copyStatus value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyStatus(CopyStatusType copyStatus) { this.copyStatus = copyStatus; return this; } /** * Get the leaseDuration property: When a blob is leased, specifies whether * the lease is of infinite or fixed duration. Possible values include: * 'infinite', 'fixed'. * * @return the leaseDuration value. */ public LeaseDurationType getLeaseDuration() { return this.leaseDuration; } /** * Set the leaseDuration property: When a blob is leased, specifies whether * the lease is of infinite or fixed duration. Possible values include: * 'infinite', 'fixed'. * * @param leaseDuration the leaseDuration value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLeaseDuration(LeaseDurationType leaseDuration) { this.leaseDuration = leaseDuration; return this; } /** * Get the leaseState property: Lease state of the blob. Possible values * include: 'available', 'leased', 'expired', 'breaking', 'broken'. * * @return the leaseState value. */ public LeaseStateType getLeaseState() { return this.leaseState; } /** * Set the leaseState property: Lease state of the blob. Possible values * include: 'available', 'leased', 'expired', 'breaking', 'broken'. * * @param leaseState the leaseState value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLeaseState(LeaseStateType leaseState) { this.leaseState = leaseState; return this; } /** * Get the leaseStatus property: The current lease status of the blob. * Possible values include: 'locked', 'unlocked'. * * @return the leaseStatus value. */ public LeaseStatusType getLeaseStatus() { return this.leaseStatus; } /** * Set the leaseStatus property: The current lease status of the blob. * Possible values include: 'locked', 'unlocked'. * * @param leaseStatus the leaseStatus value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLeaseStatus(LeaseStatusType leaseStatus) { this.leaseStatus = leaseStatus; return this; } /** * Get the clientRequestId property: If a client request id header is sent * in the request, this header will be present in the response with the * same value. * * @return the clientRequestId value. */ public String getClientRequestId() { return this.clientRequestId; } /** * Set the clientRequestId property: If a client request id header is sent * in the request, this header will be present in the response with the * same value. * * @param clientRequestId the clientRequestId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setClientRequestId(String clientRequestId) { this.clientRequestId = clientRequestId; return this; } /** * Get the requestId property: This header uniquely identifies the request * that was made and can be used for troubleshooting the request. * * @return the requestId value. */ public String getRequestId() { return this.requestId; } /** * Set the requestId property: This header uniquely identifies the request * that was made and can be used for troubleshooting the request. * * @param requestId the requestId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setRequestId(String requestId) { this.requestId = requestId; return this; } /** * Get the version property: Indicates the version of the Blob service used * to execute the request. This header is returned for requests made * against version 2009-09-19 and above. * * @return the version value. */ public String getVersion() { return this.version; } /** * Set the version property: Indicates the version of the Blob service used * to execute the request. This header is returned for requests made * against version 2009-09-19 and above. * * @param version the version value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setVersion(String version) { this.version = version; return this; } /** * Get the versionId property: A DateTime value returned by the service * that uniquely identifies the blob. The value of this header indicates * the blob version, and may be used in subsequent requests to access this * version of the blob. * * @return the versionId value. */ public String getVersionId() { return this.versionId; } /** * Set the versionId property: A DateTime value returned by the service * that uniquely identifies the blob. The value of this header indicates * the blob version, and may be used in subsequent requests to access this * version of the blob. * * @param versionId the versionId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setVersionId(String versionId) { this.versionId = versionId; return this; } /** * Get the acceptRanges property: Indicates that the service supports * requests for partial blob content. * * @return the acceptRanges value. */ public String getAcceptRanges() { return this.acceptRanges; } /** * Set the acceptRanges property: Indicates that the service supports * requests for partial blob content. * * @param acceptRanges the acceptRanges value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setAcceptRanges(String acceptRanges) { this.acceptRanges = acceptRanges; return this; } /** * Get the dateProperty property: UTC date/time value generated by the * service that indicates the time at which the response was initiated. * * @return the dateProperty value. */ public OffsetDateTime getDateProperty() { return this.dateProperty; } /** * Set the dateProperty property: UTC date/time value generated by the * service that indicates the time at which the response was initiated. * * @param dateProperty the dateProperty value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setDateProperty(OffsetDateTime dateProperty) { this.dateProperty = dateProperty; return this; } /** * Get the blobCommittedBlockCount property: The number of committed blocks * present in the blob. This header is returned only for append blobs. * * @return the blobCommittedBlockCount value. */ public Integer getBlobCommittedBlockCount() { return this.blobCommittedBlockCount; } /** * Set the blobCommittedBlockCount property: The number of committed blocks * present in the blob. This header is returned only for append blobs. * * @param blobCommittedBlockCount the blobCommittedBlockCount value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobCommittedBlockCount(Integer blobCommittedBlockCount) { this.blobCommittedBlockCount = blobCommittedBlockCount; return this; } /** * Get the isServerEncrypted property: The value of this header is set to * true if the blob data and application metadata are completely encrypted * using the specified algorithm. Otherwise, the value is set to false * (when the blob is unencrypted, or if only parts of the blob/application * metadata are encrypted). * * @return the isServerEncrypted value. */ public Boolean isServerEncrypted() { return this.isServerEncrypted; } /** * Set the isServerEncrypted property: The value of this header is set to * true if the blob data and application metadata are completely encrypted * using the specified algorithm. Otherwise, the value is set to false * (when the blob is unencrypted, or if only parts of the blob/application * metadata are encrypted). * * @param isServerEncrypted the isServerEncrypted value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setIsServerEncrypted(Boolean isServerEncrypted) { this.isServerEncrypted = isServerEncrypted; return this; } /** * Get the encryptionKeySha256 property: The SHA-256 hash of the encryption * key used to encrypt the blob. This header is only returned when the blob * was encrypted with a customer-provided key. * * @return the encryptionKeySha256 value. */ public String getEncryptionKeySha256() { return this.encryptionKeySha256; } /** * Set the encryptionKeySha256 property: The SHA-256 hash of the encryption * key used to encrypt the blob. This header is only returned when the blob * was encrypted with a customer-provided key. * * @param encryptionKeySha256 the encryptionKeySha256 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setEncryptionKeySha256(String encryptionKeySha256) { this.encryptionKeySha256 = encryptionKeySha256; return this; } /** * Get the encryptionScope property: Returns the name of the encryption * scope used to encrypt the blob contents and application metadata. Note * that the absence of this header implies use of the default account * encryption scope. * * @return the encryptionScope value. */ public String getEncryptionScope() { return this.encryptionScope; } /** * Set the encryptionScope property: Returns the name of the encryption * scope used to encrypt the blob contents and application metadata. Note * that the absence of this header implies use of the default account * encryption scope. * * @param encryptionScope the encryptionScope value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setEncryptionScope(String encryptionScope) { this.encryptionScope = encryptionScope; return this; } /** * Get the blobContentMD5 property: If the blob has a MD5 hash, and if * request contains range header (Range or x-ms-range), this response * header is returned with the value of the whole blob's MD5 value. This * value may or may not be equal to the value returned in Content-MD5 * header, with the latter calculated from the requested range. * * @return the blobContentMD5 value. */ public byte[] getBlobContentMD5() { return CoreUtils.clone(this.blobContentMD5); } /** * Set the blobContentMD5 property: If the blob has a MD5 hash, and if * request contains range header (Range or x-ms-range), this response * header is returned with the value of the whole blob's MD5 value. This * value may or may not be equal to the value returned in Content-MD5 * header, with the latter calculated from the requested range. * * @param blobContentMD5 the blobContentMD5 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobContentMD5(byte[] blobContentMD5) { this.blobContentMD5 = CoreUtils.clone(blobContentMD5); return this; } /** * Get the tagCount property: The number of tags associated with the blob. * * @return the tagCount value. */ public Long getTagCount() { return this.tagCount; } /** * Set the tagCount property: The number of tags associated with the blob. * * @param tagCount the tagCount value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setTagCount(Long tagCount) { this.tagCount = tagCount; return this; } /** * Get the contentCrc64 property: If the request is to read a specified * range and the x-ms-range-get-content-crc64 is set to true, then the * request returns a crc64 for the range, as long as the range size is less * than or equal to 4 MB. If both x-ms-range-get-content-crc64 &amp; * x-ms-range-get-content-md5 is specified in the same request, it will * fail with 400(Bad Request). * * @return the contentCrc64 value. */ public byte[] getContentCrc64() { return CoreUtils.clone(this.contentCrc64); } /** * Set the contentCrc64 property: If the request is to read a specified * range and the x-ms-range-get-content-crc64 is set to true, then the * request returns a crc64 for the range, as long as the range size is less * than or equal to 4 MB. If both x-ms-range-get-content-crc64 &amp; * x-ms-range-get-content-md5 is specified in the same request, it will * fail with 400(Bad Request). * * @param contentCrc64 the contentCrc64 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentCrc64(byte[] contentCrc64) { this.contentCrc64 = CoreUtils.clone(contentCrc64); return this; } /** * Get the errorCode property: The errorCode property. * * @return the errorCode value. */ public String getErrorCode() { return this.errorCode; } /** * Set the errorCode property: The errorCode property. * * @param errorCode the errorCode value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setErrorCode(String errorCode) { this.errorCode = errorCode; return this; } }
changed to ORSourcePOlicies
public BlobDownloadHeaders() { objectReplicationPolicies = null; }
objectReplicationPolicies = null;
public BlobDownloadHeaders() { objectReplicationSourcePolicies = null; }
class BlobDownloadHeaders { /** * Instantiates an empty {@code BlobDownloadHeaders}. */ /** * Instantiates a {@code BlobDownloadHeaders} object based on the generated, internal version of the type. * @param headers The generated headers type from which to extract values. */ public BlobDownloadHeaders(com.azure.storage.blob.implementation.models.BlobDownloadHeaders headers) { /* We have these two types because we needed to update this interface in a way that could not be generated (getObjectReplicationSourcePolicies), so we switched to generating BlobDownloadHeaders into implementation and wrapping it. Because it's headers type, we couldn't change the name of the generated type. */ this.lastModified = headers.getLastModified(); this.metadata = headers.getMetadata(); this.eTag = headers.getETag(); this.contentLength = headers.getContentLength(); this.contentType = headers.getContentType(); this.contentRange = headers.getContentRange(); this.contentEncoding = headers.getContentEncoding(); this.contentLanguage = headers.getContentLanguage(); this.contentMd5 = headers.getContentMd5(); this.contentDisposition = headers.getContentDisposition(); this.cacheControl = headers.getCacheControl(); this.blobSequenceNumber = headers.getBlobSequenceNumber(); this.blobType = headers.getBlobType(); this.leaseStatus = headers.getLeaseStatus(); this.leaseState = headers.getLeaseState(); this.leaseDuration = headers.getLeaseDuration(); this.copyId = headers.getCopyId(); this.copyStatus = headers.getCopyStatus(); this.copySource = headers.getCopySource(); this.copyProgress = headers.getCopyProgress(); this.copyCompletionTime = headers.getCopyCompletionTime(); this.copyStatusDescription = headers.getCopyStatusDescription(); this.isServerEncrypted = headers.isServerEncrypted(); this.clientRequestId = headers.getClientRequestId(); this.requestId = headers.getRequestId(); this.version = headers.getVersion(); this.versionId = headers.getVersionId(); this.acceptRanges = headers.getAcceptRanges(); this.dateProperty = headers.getDateProperty(); this.blobCommittedBlockCount = headers.getBlobCommittedBlockCount(); this.encryptionKeySha256 = headers.getEncryptionKeySha256(); this.encryptionScope = headers.getEncryptionScope(); this.blobContentMD5 = headers.getBlobContentMD5(); this.contentCrc64 = headers.getContentCrc64(); this.errorCode = headers.getErrorCode(); this.tagCount = headers.getTagCount(); Map<String, String> objectReplicationStatus = headers.getObjectReplicationRules(); this.objectReplicationPolicies = new ArrayList<>(); objectReplicationStatus = objectReplicationStatus == null ? new HashMap<>() : objectReplicationStatus; this.objectReplicationDestinationPolicyId = objectReplicationStatus.getOrDefault("policy-id", null); if (this.objectReplicationDestinationPolicyId == null) { for (Map.Entry<String, String> entry : objectReplicationStatus.entrySet()) { String[] split = entry.getKey().split("_"); String policyId = split[0]; String ruleId = split[1]; ObjectReplicationRule rule = new ObjectReplicationRule(ruleId, ObjectReplicationStatus.fromString(entry.getValue())); int index = ObjectReplicationPolicy.getIndexOfObjectReplicationPolicy(policyId, this.objectReplicationPolicies); if (index == -1) { ObjectReplicationPolicy policy = new ObjectReplicationPolicy(policyId); policy.putRule(rule); this.objectReplicationPolicies.add(policy); } else { ObjectReplicationPolicy policy = objectReplicationPolicies.get(index); policy.putRule(rule); } } } } /* * Returns the date and time the container was last modified. Any operation * that modifies the blob, including an update of the blob's metadata or * properties, changes the last-modified time of the blob. */ @JsonProperty(value = "Last-Modified") private OffsetDateTime lastModified; /* * The metadata property. */ @HeaderCollection("x-ms-meta-") private Map<String, String> metadata; /* * Optional. Only valid when Object Replication is enabled for the storage * container and on the destination blob of the replication. */ @JsonProperty(value = "x-ms-or-policy-id") private String objectReplicationDestinationPolicyId; /* * The objectReplicationRuleStatus property. */ @HeaderCollection("x-ms-or-") private List<ObjectReplicationPolicy> objectReplicationPolicies; /* * The number of bytes present in the response body. */ @JsonProperty(value = "Content-Length") private Long contentLength; /* * The media type of the body of the response. For Download Blob this is * 'application/octet-stream' */ @JsonProperty(value = "Content-Type") private String contentType; /* * Indicates the range of bytes returned in the event that the client * requested a subset of the blob by setting the 'Range' request header. */ @JsonProperty(value = "Content-Range") private String contentRange; /* * The ETag contains a value that you can use to perform operations * conditionally. If the request version is 2011-08-18 or newer, the ETag * value will be in quotes. */ @JsonProperty(value = "ETag") private String eTag; /* * If the blob has an MD5 hash and this operation is to read the full blob, * this response header is returned so that the client can check for * message content integrity. */ @JsonProperty(value = "Content-MD5") private byte[] contentMd5; /* * This header returns the value that was specified for the * Content-Encoding request header */ @JsonProperty(value = "Content-Encoding") private String contentEncoding; /* * This header is returned if it was previously specified for the blob. */ @JsonProperty(value = "Cache-Control") private String cacheControl; /* * This header returns the value that was specified for the * 'x-ms-blob-content-disposition' header. The Content-Disposition response * header field conveys additional information about how to process the * response payload, and also can be used to attach additional metadata. * For example, if set to attachment, it indicates that the user-agent * should not display the response, but instead show a Save As dialog with * a filename other than the blob name specified. */ @JsonProperty(value = "Content-Disposition") private String contentDisposition; /* * This header returns the value that was specified for the * Content-Language request header. */ @JsonProperty(value = "Content-Language") private String contentLanguage; /* * The current sequence number for a page blob. This header is not returned * for block blobs or append blobs */ @JsonProperty(value = "x-ms-blob-sequence-number") private Long blobSequenceNumber; /* * The blob's type. Possible values include: 'BlockBlob', 'PageBlob', * 'AppendBlob' */ @JsonProperty(value = "x-ms-blob-type") private BlobType blobType; /* * Conclusion time of the last attempted Copy Blob operation where this * blob was the destination blob. This value can specify the time of a * completed, aborted, or failed copy attempt. This header does not appear * if a copy is pending, if this blob has never been the destination in a * Copy Blob operation, or if this blob has been modified after a concluded * Copy Blob operation using Set Blob Properties, Put Blob, or Put Block * List. */ @JsonProperty(value = "x-ms-copy-completion-time") private OffsetDateTime copyCompletionTime; /* * Only appears when x-ms-copy-status is failed or pending. Describes the * cause of the last fatal or non-fatal copy operation failure. This header * does not appear if this blob has never been the destination in a Copy * Blob operation, or if this blob has been modified after a concluded Copy * Blob operation using Set Blob Properties, Put Blob, or Put Block List */ @JsonProperty(value = "x-ms-copy-status-description") private String copyStatusDescription; /* * String identifier for this copy operation. Use with Get Blob Properties * to check the status of this copy operation, or pass to Abort Copy Blob * to abort a pending copy. */ @JsonProperty(value = "x-ms-copy-id") private String copyId; /* * Contains the number of bytes copied and the total bytes in the source in * the last attempted Copy Blob operation where this blob was the * destination blob. Can show between 0 and Content-Length bytes copied. * This header does not appear if this blob has never been the destination * in a Copy Blob operation, or if this blob has been modified after a * concluded Copy Blob operation using Set Blob Properties, Put Blob, or * Put Block List */ @JsonProperty(value = "x-ms-copy-progress") private String copyProgress; /* * URL up to 2 KB in length that specifies the source blob or file used in * the last attempted Copy Blob operation where this blob was the * destination blob. This header does not appear if this blob has never * been the destination in a Copy Blob operation, or if this blob has been * modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. */ @JsonProperty(value = "x-ms-copy-source") private String copySource; /* * State of the copy operation identified by x-ms-copy-id. Possible values * include: 'pending', 'success', 'aborted', 'failed' */ @JsonProperty(value = "x-ms-copy-status") private CopyStatusType copyStatus; /* * When a blob is leased, specifies whether the lease is of infinite or * fixed duration. Possible values include: 'infinite', 'fixed' */ @JsonProperty(value = "x-ms-lease-duration") private LeaseDurationType leaseDuration; /* * Lease state of the blob. Possible values include: 'available', 'leased', * 'expired', 'breaking', 'broken' */ @JsonProperty(value = "x-ms-lease-state") private LeaseStateType leaseState; /* * The current lease status of the blob. Possible values include: 'locked', * 'unlocked' */ @JsonProperty(value = "x-ms-lease-status") private LeaseStatusType leaseStatus; /* * If a client request id header is sent in the request, this header will * be present in the response with the same value. */ @JsonProperty(value = "x-ms-client-request-id") private String clientRequestId; /* * This header uniquely identifies the request that was made and can be * used for troubleshooting the request. */ @JsonProperty(value = "x-ms-request-id") private String requestId; /* * Indicates the version of the Blob service used to execute the request. * This header is returned for requests made against version 2009-09-19 and * above. */ @JsonProperty(value = "x-ms-version") private String version; /* * A DateTime value returned by the service that uniquely identifies the * blob. The value of this header indicates the blob version, and may be * used in subsequent requests to access this version of the blob. */ @JsonProperty(value = "x-ms-version-id") private String versionId; /* * Indicates that the service supports requests for partial blob content. */ @JsonProperty(value = "Accept-Ranges") private String acceptRanges; /* * UTC date/time value generated by the service that indicates the time at * which the response was initiated */ @JsonProperty(value = "Date") private OffsetDateTime dateProperty; /* * The number of committed blocks present in the blob. This header is * returned only for append blobs. */ @JsonProperty(value = "x-ms-blob-committed-block-count") private Integer blobCommittedBlockCount; /* * The value of this header is set to true if the blob data and application * metadata are completely encrypted using the specified algorithm. * Otherwise, the value is set to false (when the blob is unencrypted, or * if only parts of the blob/application metadata are encrypted). */ @JsonProperty(value = "x-ms-server-encrypted") private Boolean isServerEncrypted; /* * The SHA-256 hash of the encryption key used to encrypt the blob. This * header is only returned when the blob was encrypted with a * customer-provided key. */ @JsonProperty(value = "x-ms-encryption-key-sha256") private String encryptionKeySha256; /* * Returns the name of the encryption scope used to encrypt the blob * contents and application metadata. Note that the absence of this header * implies use of the default account encryption scope. */ @JsonProperty(value = "x-ms-encryption-scope") private String encryptionScope; /* * If the blob has a MD5 hash, and if request contains range header (Range * or x-ms-range), this response header is returned with the value of the * whole blob's MD5 value. This value may or may not be equal to the value * returned in Content-MD5 header, with the latter calculated from the * requested range */ @JsonProperty(value = "x-ms-blob-content-md5") private byte[] blobContentMD5; /* * The number of tags associated with the blob */ @JsonProperty(value = "x-ms-tag-count") private Long tagCount; /* * If the request is to read a specified range and the * x-ms-range-get-content-crc64 is set to true, then the request returns a * crc64 for the range, as long as the range size is less than or equal to * 4 MB. If both x-ms-range-get-content-crc64 & x-ms-range-get-content-md5 * is specified in the same request, it will fail with 400(Bad Request) */ @JsonProperty(value = "x-ms-content-crc64") private byte[] contentCrc64; /* * The errorCode property. */ @JsonProperty(value = "x-ms-error-code") private String errorCode; /** * Get the lastModified property: Returns the date and time the container * was last modified. Any operation that modifies the blob, including an * update of the blob's metadata or properties, changes the last-modified * time of the blob. * * @return the lastModified value. */ public OffsetDateTime getLastModified() { return this.lastModified; } /** * Set the lastModified property: Returns the date and time the container * was last modified. Any operation that modifies the blob, including an * update of the blob's metadata or properties, changes the last-modified * time of the blob. * * @param lastModified the lastModified value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLastModified(OffsetDateTime lastModified) { this.lastModified = lastModified; return this; } /** * Get the metadata property: The metadata property. * * @return the metadata value. */ public Map<String, String> getMetadata() { return this.metadata; } /** * Set the metadata property: The metadata property. * * @param metadata the metadata value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setMetadata(Map<String, String> metadata) { this.metadata = metadata; return this; } /** * Get the objectReplicationDestinationPolicyId property: Optional. Only * valid when Object Replication is enabled for the storage container and * on the destination blob of the replication. * * @return the objectReplicationDestinationPolicyId value. */ public String getObjectReplicationDestinationPolicyId() { return this.objectReplicationDestinationPolicyId; } /** * Set the objectReplicationDestinationPolicyId property: Optional. Only * valid when Object Replication is enabled for the storage container and * on the destination blob of the replication. * * @param objectReplicationDestinationPolicyId the * objectReplicationDestinationPolicyId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setObjectReplicationDestinationPolicyId(String objectReplicationDestinationPolicyId) { this.objectReplicationDestinationPolicyId = objectReplicationDestinationPolicyId; return this; } /** * Get the objectReplicationPolicies property: The * objectReplicationPolicies property. * * @return the objectReplicationPolicies value. */ public List<ObjectReplicationPolicy> getObjectReplicationPolicies() { return this.objectReplicationPolicies; } /** * Set the objectReplicationPolicies property: The * objectReplicationPolicies property. * * @param objectReplicationPolicies the objectReplicationPolicies value * to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setObjectReplicationPolicies(List<ObjectReplicationPolicy> objectReplicationPolicies) { this.objectReplicationPolicies = objectReplicationPolicies; return this; } /** * Get the contentLength property: The number of bytes present in the * response body. * * @return the contentLength value. */ public Long getContentLength() { return this.contentLength; } /** * Set the contentLength property: The number of bytes present in the * response body. * * @param contentLength the contentLength value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentLength(Long contentLength) { this.contentLength = contentLength; return this; } /** * Get the contentType property: The media type of the body of the * response. For Download Blob this is 'application/octet-stream'. * * @return the contentType value. */ public String getContentType() { return this.contentType; } /** * Set the contentType property: The media type of the body of the * response. For Download Blob this is 'application/octet-stream'. * * @param contentType the contentType value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentType(String contentType) { this.contentType = contentType; return this; } /** * Get the contentRange property: Indicates the range of bytes returned in * the event that the client requested a subset of the blob by setting the * 'Range' request header. * * @return the contentRange value. */ public String getContentRange() { return this.contentRange; } /** * Set the contentRange property: Indicates the range of bytes returned in * the event that the client requested a subset of the blob by setting the * 'Range' request header. * * @param contentRange the contentRange value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentRange(String contentRange) { this.contentRange = contentRange; return this; } /** * Get the eTag property: The ETag contains a value that you can use to * perform operations conditionally. If the request version is 2011-08-18 * or newer, the ETag value will be in quotes. * * @return the eTag value. */ public String getETag() { return this.eTag; } /** * Set the eTag property: The ETag contains a value that you can use to * perform operations conditionally. If the request version is 2011-08-18 * or newer, the ETag value will be in quotes. * * @param eTag the eTag value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setETag(String eTag) { this.eTag = eTag; return this; } /** * Get the contentMd5 property: If the blob has an MD5 hash and this * operation is to read the full blob, this response header is returned so * that the client can check for message content integrity. * * @return the contentMd5 value. */ public byte[] getContentMd5() { return CoreUtils.clone(this.contentMd5); } /** * Set the contentMd5 property: If the blob has an MD5 hash and this * operation is to read the full blob, this response header is returned so * that the client can check for message content integrity. * * @param contentMd5 the contentMd5 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentMd5(byte[] contentMd5) { this.contentMd5 = CoreUtils.clone(contentMd5); return this; } /** * Get the contentEncoding property: This header returns the value that was * specified for the Content-Encoding request header. * * @return the contentEncoding value. */ public String getContentEncoding() { return this.contentEncoding; } /** * Set the contentEncoding property: This header returns the value that was * specified for the Content-Encoding request header. * * @param contentEncoding the contentEncoding value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentEncoding(String contentEncoding) { this.contentEncoding = contentEncoding; return this; } /** * Get the cacheControl property: This header is returned if it was * previously specified for the blob. * * @return the cacheControl value. */ public String getCacheControl() { return this.cacheControl; } /** * Set the cacheControl property: This header is returned if it was * previously specified for the blob. * * @param cacheControl the cacheControl value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCacheControl(String cacheControl) { this.cacheControl = cacheControl; return this; } /** * Get the contentDisposition property: This header returns the value that * was specified for the 'x-ms-blob-content-disposition' header. The * Content-Disposition response header field conveys additional information * about how to process the response payload, and also can be used to * attach additional metadata. For example, if set to attachment, it * indicates that the user-agent should not display the response, but * instead show a Save As dialog with a filename other than the blob name * specified. * * @return the contentDisposition value. */ public String getContentDisposition() { return this.contentDisposition; } /** * Set the contentDisposition property: This header returns the value that * was specified for the 'x-ms-blob-content-disposition' header. The * Content-Disposition response header field conveys additional information * about how to process the response payload, and also can be used to * attach additional metadata. For example, if set to attachment, it * indicates that the user-agent should not display the response, but * instead show a Save As dialog with a filename other than the blob name * specified. * * @param contentDisposition the contentDisposition value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentDisposition(String contentDisposition) { this.contentDisposition = contentDisposition; return this; } /** * Get the contentLanguage property: This header returns the value that was * specified for the Content-Language request header. * * @return the contentLanguage value. */ public String getContentLanguage() { return this.contentLanguage; } /** * Set the contentLanguage property: This header returns the value that was * specified for the Content-Language request header. * * @param contentLanguage the contentLanguage value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentLanguage(String contentLanguage) { this.contentLanguage = contentLanguage; return this; } /** * Get the blobSequenceNumber property: The current sequence number for a * page blob. This header is not returned for block blobs or append blobs. * * @return the blobSequenceNumber value. */ public Long getBlobSequenceNumber() { return this.blobSequenceNumber; } /** * Set the blobSequenceNumber property: The current sequence number for a * page blob. This header is not returned for block blobs or append blobs. * * @param blobSequenceNumber the blobSequenceNumber value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobSequenceNumber(Long blobSequenceNumber) { this.blobSequenceNumber = blobSequenceNumber; return this; } /** * Get the blobType property: The blob's type. Possible values include: * 'BlockBlob', 'PageBlob', 'AppendBlob'. * * @return the blobType value. */ public BlobType getBlobType() { return this.blobType; } /** * Set the blobType property: The blob's type. Possible values include: * 'BlockBlob', 'PageBlob', 'AppendBlob'. * * @param blobType the blobType value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobType(BlobType blobType) { this.blobType = blobType; return this; } /** * Get the copyCompletionTime property: Conclusion time of the last * attempted Copy Blob operation where this blob was the destination blob. * This value can specify the time of a completed, aborted, or failed copy * attempt. This header does not appear if a copy is pending, if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @return the copyCompletionTime value. */ public OffsetDateTime getCopyCompletionTime() { return this.copyCompletionTime; } /** * Set the copyCompletionTime property: Conclusion time of the last * attempted Copy Blob operation where this blob was the destination blob. * This value can specify the time of a completed, aborted, or failed copy * attempt. This header does not appear if a copy is pending, if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @param copyCompletionTime the copyCompletionTime value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyCompletionTime(OffsetDateTime copyCompletionTime) { this.copyCompletionTime = copyCompletionTime; return this; } /** * Get the copyStatusDescription property: Only appears when * x-ms-copy-status is failed or pending. Describes the cause of the last * fatal or non-fatal copy operation failure. This header does not appear * if this blob has never been the destination in a Copy Blob operation, or * if this blob has been modified after a concluded Copy Blob operation * using Set Blob Properties, Put Blob, or Put Block List. * * @return the copyStatusDescription value. */ public String getCopyStatusDescription() { return this.copyStatusDescription; } /** * Set the copyStatusDescription property: Only appears when * x-ms-copy-status is failed or pending. Describes the cause of the last * fatal or non-fatal copy operation failure. This header does not appear * if this blob has never been the destination in a Copy Blob operation, or * if this blob has been modified after a concluded Copy Blob operation * using Set Blob Properties, Put Blob, or Put Block List. * * @param copyStatusDescription the copyStatusDescription value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyStatusDescription(String copyStatusDescription) { this.copyStatusDescription = copyStatusDescription; return this; } /** * Get the copyId property: String identifier for this copy operation. Use * with Get Blob Properties to check the status of this copy operation, or * pass to Abort Copy Blob to abort a pending copy. * * @return the copyId value. */ public String getCopyId() { return this.copyId; } /** * Set the copyId property: String identifier for this copy operation. Use * with Get Blob Properties to check the status of this copy operation, or * pass to Abort Copy Blob to abort a pending copy. * * @param copyId the copyId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyId(String copyId) { this.copyId = copyId; return this; } /** * Get the copyProgress property: Contains the number of bytes copied and * the total bytes in the source in the last attempted Copy Blob operation * where this blob was the destination blob. Can show between 0 and * Content-Length bytes copied. This header does not appear if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @return the copyProgress value. */ public String getCopyProgress() { return this.copyProgress; } /** * Set the copyProgress property: Contains the number of bytes copied and * the total bytes in the source in the last attempted Copy Blob operation * where this blob was the destination blob. Can show between 0 and * Content-Length bytes copied. This header does not appear if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @param copyProgress the copyProgress value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyProgress(String copyProgress) { this.copyProgress = copyProgress; return this; } /** * Get the copySource property: URL up to 2 KB in length that specifies the * source blob or file used in the last attempted Copy Blob operation where * this blob was the destination blob. This header does not appear if this * blob has never been the destination in a Copy Blob operation, or if this * blob has been modified after a concluded Copy Blob operation using Set * Blob Properties, Put Blob, or Put Block List. * * @return the copySource value. */ public String getCopySource() { return this.copySource; } /** * Set the copySource property: URL up to 2 KB in length that specifies the * source blob or file used in the last attempted Copy Blob operation where * this blob was the destination blob. This header does not appear if this * blob has never been the destination in a Copy Blob operation, or if this * blob has been modified after a concluded Copy Blob operation using Set * Blob Properties, Put Blob, or Put Block List. * * @param copySource the copySource value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopySource(String copySource) { this.copySource = copySource; return this; } /** * Get the copyStatus property: State of the copy operation identified by * x-ms-copy-id. Possible values include: 'pending', 'success', 'aborted', * 'failed'. * * @return the copyStatus value. */ public CopyStatusType getCopyStatus() { return this.copyStatus; } /** * Set the copyStatus property: State of the copy operation identified by * x-ms-copy-id. Possible values include: 'pending', 'success', 'aborted', * 'failed'. * * @param copyStatus the copyStatus value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyStatus(CopyStatusType copyStatus) { this.copyStatus = copyStatus; return this; } /** * Get the leaseDuration property: When a blob is leased, specifies whether * the lease is of infinite or fixed duration. Possible values include: * 'infinite', 'fixed'. * * @return the leaseDuration value. */ public LeaseDurationType getLeaseDuration() { return this.leaseDuration; } /** * Set the leaseDuration property: When a blob is leased, specifies whether * the lease is of infinite or fixed duration. Possible values include: * 'infinite', 'fixed'. * * @param leaseDuration the leaseDuration value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLeaseDuration(LeaseDurationType leaseDuration) { this.leaseDuration = leaseDuration; return this; } /** * Get the leaseState property: Lease state of the blob. Possible values * include: 'available', 'leased', 'expired', 'breaking', 'broken'. * * @return the leaseState value. */ public LeaseStateType getLeaseState() { return this.leaseState; } /** * Set the leaseState property: Lease state of the blob. Possible values * include: 'available', 'leased', 'expired', 'breaking', 'broken'. * * @param leaseState the leaseState value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLeaseState(LeaseStateType leaseState) { this.leaseState = leaseState; return this; } /** * Get the leaseStatus property: The current lease status of the blob. * Possible values include: 'locked', 'unlocked'. * * @return the leaseStatus value. */ public LeaseStatusType getLeaseStatus() { return this.leaseStatus; } /** * Set the leaseStatus property: The current lease status of the blob. * Possible values include: 'locked', 'unlocked'. * * @param leaseStatus the leaseStatus value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLeaseStatus(LeaseStatusType leaseStatus) { this.leaseStatus = leaseStatus; return this; } /** * Get the clientRequestId property: If a client request id header is sent * in the request, this header will be present in the response with the * same value. * * @return the clientRequestId value. */ public String getClientRequestId() { return this.clientRequestId; } /** * Set the clientRequestId property: If a client request id header is sent * in the request, this header will be present in the response with the * same value. * * @param clientRequestId the clientRequestId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setClientRequestId(String clientRequestId) { this.clientRequestId = clientRequestId; return this; } /** * Get the requestId property: This header uniquely identifies the request * that was made and can be used for troubleshooting the request. * * @return the requestId value. */ public String getRequestId() { return this.requestId; } /** * Set the requestId property: This header uniquely identifies the request * that was made and can be used for troubleshooting the request. * * @param requestId the requestId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setRequestId(String requestId) { this.requestId = requestId; return this; } /** * Get the version property: Indicates the version of the Blob service used * to execute the request. This header is returned for requests made * against version 2009-09-19 and above. * * @return the version value. */ public String getVersion() { return this.version; } /** * Set the version property: Indicates the version of the Blob service used * to execute the request. This header is returned for requests made * against version 2009-09-19 and above. * * @param version the version value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setVersion(String version) { this.version = version; return this; } /** * Get the versionId property: A DateTime value returned by the service * that uniquely identifies the blob. The value of this header indicates * the blob version, and may be used in subsequent requests to access this * version of the blob. * * @return the versionId value. */ public String getVersionId() { return this.versionId; } /** * Set the versionId property: A DateTime value returned by the service * that uniquely identifies the blob. The value of this header indicates * the blob version, and may be used in subsequent requests to access this * version of the blob. * * @param versionId the versionId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setVersionId(String versionId) { this.versionId = versionId; return this; } /** * Get the acceptRanges property: Indicates that the service supports * requests for partial blob content. * * @return the acceptRanges value. */ public String getAcceptRanges() { return this.acceptRanges; } /** * Set the acceptRanges property: Indicates that the service supports * requests for partial blob content. * * @param acceptRanges the acceptRanges value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setAcceptRanges(String acceptRanges) { this.acceptRanges = acceptRanges; return this; } /** * Get the dateProperty property: UTC date/time value generated by the * service that indicates the time at which the response was initiated. * * @return the dateProperty value. */ public OffsetDateTime getDateProperty() { return this.dateProperty; } /** * Set the dateProperty property: UTC date/time value generated by the * service that indicates the time at which the response was initiated. * * @param dateProperty the dateProperty value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setDateProperty(OffsetDateTime dateProperty) { this.dateProperty = dateProperty; return this; } /** * Get the blobCommittedBlockCount property: The number of committed blocks * present in the blob. This header is returned only for append blobs. * * @return the blobCommittedBlockCount value. */ public Integer getBlobCommittedBlockCount() { return this.blobCommittedBlockCount; } /** * Set the blobCommittedBlockCount property: The number of committed blocks * present in the blob. This header is returned only for append blobs. * * @param blobCommittedBlockCount the blobCommittedBlockCount value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobCommittedBlockCount(Integer blobCommittedBlockCount) { this.blobCommittedBlockCount = blobCommittedBlockCount; return this; } /** * Get the isServerEncrypted property: The value of this header is set to * true if the blob data and application metadata are completely encrypted * using the specified algorithm. Otherwise, the value is set to false * (when the blob is unencrypted, or if only parts of the blob/application * metadata are encrypted). * * @return the isServerEncrypted value. */ public Boolean isServerEncrypted() { return this.isServerEncrypted; } /** * Set the isServerEncrypted property: The value of this header is set to * true if the blob data and application metadata are completely encrypted * using the specified algorithm. Otherwise, the value is set to false * (when the blob is unencrypted, or if only parts of the blob/application * metadata are encrypted). * * @param isServerEncrypted the isServerEncrypted value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setIsServerEncrypted(Boolean isServerEncrypted) { this.isServerEncrypted = isServerEncrypted; return this; } /** * Get the encryptionKeySha256 property: The SHA-256 hash of the encryption * key used to encrypt the blob. This header is only returned when the blob * was encrypted with a customer-provided key. * * @return the encryptionKeySha256 value. */ public String getEncryptionKeySha256() { return this.encryptionKeySha256; } /** * Set the encryptionKeySha256 property: The SHA-256 hash of the encryption * key used to encrypt the blob. This header is only returned when the blob * was encrypted with a customer-provided key. * * @param encryptionKeySha256 the encryptionKeySha256 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setEncryptionKeySha256(String encryptionKeySha256) { this.encryptionKeySha256 = encryptionKeySha256; return this; } /** * Get the encryptionScope property: Returns the name of the encryption * scope used to encrypt the blob contents and application metadata. Note * that the absence of this header implies use of the default account * encryption scope. * * @return the encryptionScope value. */ public String getEncryptionScope() { return this.encryptionScope; } /** * Set the encryptionScope property: Returns the name of the encryption * scope used to encrypt the blob contents and application metadata. Note * that the absence of this header implies use of the default account * encryption scope. * * @param encryptionScope the encryptionScope value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setEncryptionScope(String encryptionScope) { this.encryptionScope = encryptionScope; return this; } /** * Get the blobContentMD5 property: If the blob has a MD5 hash, and if * request contains range header (Range or x-ms-range), this response * header is returned with the value of the whole blob's MD5 value. This * value may or may not be equal to the value returned in Content-MD5 * header, with the latter calculated from the requested range. * * @return the blobContentMD5 value. */ public byte[] getBlobContentMD5() { return CoreUtils.clone(this.blobContentMD5); } /** * Set the blobContentMD5 property: If the blob has a MD5 hash, and if * request contains range header (Range or x-ms-range), this response * header is returned with the value of the whole blob's MD5 value. This * value may or may not be equal to the value returned in Content-MD5 * header, with the latter calculated from the requested range. * * @param blobContentMD5 the blobContentMD5 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobContentMD5(byte[] blobContentMD5) { this.blobContentMD5 = CoreUtils.clone(blobContentMD5); return this; } /** * Get the tagCount property: The number of tags associated with the blob. * * @return the tagCount value. */ public Long getTagCount() { return this.tagCount; } /** * Set the tagCount property: The number of tags associated with the blob. * * @param tagCount the tagCount value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setTagCount(Long tagCount) { this.tagCount = tagCount; return this; } /** * Get the contentCrc64 property: If the request is to read a specified * range and the x-ms-range-get-content-crc64 is set to true, then the * request returns a crc64 for the range, as long as the range size is less * than or equal to 4 MB. If both x-ms-range-get-content-crc64 &amp; * x-ms-range-get-content-md5 is specified in the same request, it will * fail with 400(Bad Request). * * @return the contentCrc64 value. */ public byte[] getContentCrc64() { return CoreUtils.clone(this.contentCrc64); } /** * Set the contentCrc64 property: If the request is to read a specified * range and the x-ms-range-get-content-crc64 is set to true, then the * request returns a crc64 for the range, as long as the range size is less * than or equal to 4 MB. If both x-ms-range-get-content-crc64 &amp; * x-ms-range-get-content-md5 is specified in the same request, it will * fail with 400(Bad Request). * * @param contentCrc64 the contentCrc64 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentCrc64(byte[] contentCrc64) { this.contentCrc64 = CoreUtils.clone(contentCrc64); return this; } /** * Get the errorCode property: The errorCode property. * * @return the errorCode value. */ public String getErrorCode() { return this.errorCode; } /** * Set the errorCode property: The errorCode property. * * @param errorCode the errorCode value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setErrorCode(String errorCode) { this.errorCode = errorCode; return this; } }
class BlobDownloadHeaders { /** * Instantiates an empty {@code BlobDownloadHeaders}. */ /** * Instantiates a {@code BlobDownloadHeaders} object based on the generated, internal version of the type. * @param headers The generated headers type from which to extract values. */ public BlobDownloadHeaders(com.azure.storage.blob.implementation.models.BlobDownloadHeaders headers) { /* We have these two types because we needed to update this interface in a way that could not be generated (getObjectReplicationSourcePolicies), so we switched to generating BlobDownloadHeaders into implementation and wrapping it. Because it's headers type, we couldn't change the name of the generated type. */ this.lastModified = headers.getLastModified(); this.metadata = headers.getMetadata(); this.eTag = headers.getETag(); this.contentLength = headers.getContentLength(); this.contentType = headers.getContentType(); this.contentRange = headers.getContentRange(); this.contentEncoding = headers.getContentEncoding(); this.contentLanguage = headers.getContentLanguage(); this.contentMd5 = headers.getContentMd5(); this.contentDisposition = headers.getContentDisposition(); this.cacheControl = headers.getCacheControl(); this.blobSequenceNumber = headers.getBlobSequenceNumber(); this.blobType = headers.getBlobType(); this.leaseStatus = headers.getLeaseStatus(); this.leaseState = headers.getLeaseState(); this.leaseDuration = headers.getLeaseDuration(); this.copyId = headers.getCopyId(); this.copyStatus = headers.getCopyStatus(); this.copySource = headers.getCopySource(); this.copyProgress = headers.getCopyProgress(); this.copyCompletionTime = headers.getCopyCompletionTime(); this.copyStatusDescription = headers.getCopyStatusDescription(); this.isServerEncrypted = headers.isServerEncrypted(); this.clientRequestId = headers.getClientRequestId(); this.requestId = headers.getRequestId(); this.version = headers.getVersion(); this.versionId = headers.getVersionId(); this.acceptRanges = headers.getAcceptRanges(); this.dateProperty = headers.getDateProperty(); this.blobCommittedBlockCount = headers.getBlobCommittedBlockCount(); this.encryptionKeySha256 = headers.getEncryptionKeySha256(); this.encryptionScope = headers.getEncryptionScope(); this.blobContentMD5 = headers.getBlobContentMD5(); this.contentCrc64 = headers.getContentCrc64(); this.errorCode = headers.getErrorCode(); this.tagCount = headers.getTagCount(); Map<String, String> objectReplicationStatus = headers.getObjectReplicationRules(); Map<String, List<ObjectReplicationRule>> internalSourcePolicies = new HashMap<>(); objectReplicationStatus = objectReplicationStatus == null ? new HashMap<>() : objectReplicationStatus; this.objectReplicationDestinationPolicyId = objectReplicationStatus.getOrDefault("policy-id", null); if (this.objectReplicationDestinationPolicyId == null) { for (Map.Entry<String, String> entry : objectReplicationStatus.entrySet()) { String[] split = entry.getKey().split("_"); String policyId = split[0]; String ruleId = split[1]; ObjectReplicationRule rule = new ObjectReplicationRule(ruleId, ObjectReplicationStatus.fromString(entry.getValue())); if (!internalSourcePolicies.containsKey(policyId)) { internalSourcePolicies.put(policyId, new ArrayList<>()); } internalSourcePolicies.get(policyId).add(rule); } } this.objectReplicationSourcePolicies = new ArrayList<>(); for (Map.Entry<String, List<ObjectReplicationRule>> entry : internalSourcePolicies.entrySet()) { this.objectReplicationSourcePolicies.add(new ObjectReplicationPolicy(entry.getKey(), entry.getValue())); } } /* * Returns the date and time the container was last modified. Any operation * that modifies the blob, including an update of the blob's metadata or * properties, changes the last-modified time of the blob. */ @JsonProperty(value = "Last-Modified") private OffsetDateTime lastModified; /* * The metadata property. */ @HeaderCollection("x-ms-meta-") private Map<String, String> metadata; /* * Optional. Only valid when Object Replication is enabled for the storage * container and on the destination blob of the replication. */ @JsonProperty(value = "x-ms-or-policy-id") private String objectReplicationDestinationPolicyId; /* * The objectReplicationRuleStatus property. */ @HeaderCollection("x-ms-or-") private List<ObjectReplicationPolicy> objectReplicationSourcePolicies; /* * The number of bytes present in the response body. */ @JsonProperty(value = "Content-Length") private Long contentLength; /* * The media type of the body of the response. For Download Blob this is * 'application/octet-stream' */ @JsonProperty(value = "Content-Type") private String contentType; /* * Indicates the range of bytes returned in the event that the client * requested a subset of the blob by setting the 'Range' request header. */ @JsonProperty(value = "Content-Range") private String contentRange; /* * The ETag contains a value that you can use to perform operations * conditionally. If the request version is 2011-08-18 or newer, the ETag * value will be in quotes. */ @JsonProperty(value = "ETag") private String eTag; /* * If the blob has an MD5 hash and this operation is to read the full blob, * this response header is returned so that the client can check for * message content integrity. */ @JsonProperty(value = "Content-MD5") private byte[] contentMd5; /* * This header returns the value that was specified for the * Content-Encoding request header */ @JsonProperty(value = "Content-Encoding") private String contentEncoding; /* * This header is returned if it was previously specified for the blob. */ @JsonProperty(value = "Cache-Control") private String cacheControl; /* * This header returns the value that was specified for the * 'x-ms-blob-content-disposition' header. The Content-Disposition response * header field conveys additional information about how to process the * response payload, and also can be used to attach additional metadata. * For example, if set to attachment, it indicates that the user-agent * should not display the response, but instead show a Save As dialog with * a filename other than the blob name specified. */ @JsonProperty(value = "Content-Disposition") private String contentDisposition; /* * This header returns the value that was specified for the * Content-Language request header. */ @JsonProperty(value = "Content-Language") private String contentLanguage; /* * The current sequence number for a page blob. This header is not returned * for block blobs or append blobs */ @JsonProperty(value = "x-ms-blob-sequence-number") private Long blobSequenceNumber; /* * The blob's type. Possible values include: 'BlockBlob', 'PageBlob', * 'AppendBlob' */ @JsonProperty(value = "x-ms-blob-type") private BlobType blobType; /* * Conclusion time of the last attempted Copy Blob operation where this * blob was the destination blob. This value can specify the time of a * completed, aborted, or failed copy attempt. This header does not appear * if a copy is pending, if this blob has never been the destination in a * Copy Blob operation, or if this blob has been modified after a concluded * Copy Blob operation using Set Blob Properties, Put Blob, or Put Block * List. */ @JsonProperty(value = "x-ms-copy-completion-time") private OffsetDateTime copyCompletionTime; /* * Only appears when x-ms-copy-status is failed or pending. Describes the * cause of the last fatal or non-fatal copy operation failure. This header * does not appear if this blob has never been the destination in a Copy * Blob operation, or if this blob has been modified after a concluded Copy * Blob operation using Set Blob Properties, Put Blob, or Put Block List */ @JsonProperty(value = "x-ms-copy-status-description") private String copyStatusDescription; /* * String identifier for this copy operation. Use with Get Blob Properties * to check the status of this copy operation, or pass to Abort Copy Blob * to abort a pending copy. */ @JsonProperty(value = "x-ms-copy-id") private String copyId; /* * Contains the number of bytes copied and the total bytes in the source in * the last attempted Copy Blob operation where this blob was the * destination blob. Can show between 0 and Content-Length bytes copied. * This header does not appear if this blob has never been the destination * in a Copy Blob operation, or if this blob has been modified after a * concluded Copy Blob operation using Set Blob Properties, Put Blob, or * Put Block List */ @JsonProperty(value = "x-ms-copy-progress") private String copyProgress; /* * URL up to 2 KB in length that specifies the source blob or file used in * the last attempted Copy Blob operation where this blob was the * destination blob. This header does not appear if this blob has never * been the destination in a Copy Blob operation, or if this blob has been * modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. */ @JsonProperty(value = "x-ms-copy-source") private String copySource; /* * State of the copy operation identified by x-ms-copy-id. Possible values * include: 'pending', 'success', 'aborted', 'failed' */ @JsonProperty(value = "x-ms-copy-status") private CopyStatusType copyStatus; /* * When a blob is leased, specifies whether the lease is of infinite or * fixed duration. Possible values include: 'infinite', 'fixed' */ @JsonProperty(value = "x-ms-lease-duration") private LeaseDurationType leaseDuration; /* * Lease state of the blob. Possible values include: 'available', 'leased', * 'expired', 'breaking', 'broken' */ @JsonProperty(value = "x-ms-lease-state") private LeaseStateType leaseState; /* * The current lease status of the blob. Possible values include: 'locked', * 'unlocked' */ @JsonProperty(value = "x-ms-lease-status") private LeaseStatusType leaseStatus; /* * If a client request id header is sent in the request, this header will * be present in the response with the same value. */ @JsonProperty(value = "x-ms-client-request-id") private String clientRequestId; /* * This header uniquely identifies the request that was made and can be * used for troubleshooting the request. */ @JsonProperty(value = "x-ms-request-id") private String requestId; /* * Indicates the version of the Blob service used to execute the request. * This header is returned for requests made against version 2009-09-19 and * above. */ @JsonProperty(value = "x-ms-version") private String version; /* * A DateTime value returned by the service that uniquely identifies the * blob. The value of this header indicates the blob version, and may be * used in subsequent requests to access this version of the blob. */ @JsonProperty(value = "x-ms-version-id") private String versionId; /* * Indicates that the service supports requests for partial blob content. */ @JsonProperty(value = "Accept-Ranges") private String acceptRanges; /* * UTC date/time value generated by the service that indicates the time at * which the response was initiated */ @JsonProperty(value = "Date") private OffsetDateTime dateProperty; /* * The number of committed blocks present in the blob. This header is * returned only for append blobs. */ @JsonProperty(value = "x-ms-blob-committed-block-count") private Integer blobCommittedBlockCount; /* * The value of this header is set to true if the blob data and application * metadata are completely encrypted using the specified algorithm. * Otherwise, the value is set to false (when the blob is unencrypted, or * if only parts of the blob/application metadata are encrypted). */ @JsonProperty(value = "x-ms-server-encrypted") private Boolean isServerEncrypted; /* * The SHA-256 hash of the encryption key used to encrypt the blob. This * header is only returned when the blob was encrypted with a * customer-provided key. */ @JsonProperty(value = "x-ms-encryption-key-sha256") private String encryptionKeySha256; /* * Returns the name of the encryption scope used to encrypt the blob * contents and application metadata. Note that the absence of this header * implies use of the default account encryption scope. */ @JsonProperty(value = "x-ms-encryption-scope") private String encryptionScope; /* * If the blob has a MD5 hash, and if request contains range header (Range * or x-ms-range), this response header is returned with the value of the * whole blob's MD5 value. This value may or may not be equal to the value * returned in Content-MD5 header, with the latter calculated from the * requested range */ @JsonProperty(value = "x-ms-blob-content-md5") private byte[] blobContentMD5; /* * The number of tags associated with the blob */ @JsonProperty(value = "x-ms-tag-count") private Long tagCount; /* * If the request is to read a specified range and the * x-ms-range-get-content-crc64 is set to true, then the request returns a * crc64 for the range, as long as the range size is less than or equal to * 4 MB. If both x-ms-range-get-content-crc64 & x-ms-range-get-content-md5 * is specified in the same request, it will fail with 400(Bad Request) */ @JsonProperty(value = "x-ms-content-crc64") private byte[] contentCrc64; /* * The errorCode property. */ @JsonProperty(value = "x-ms-error-code") private String errorCode; /** * Get the lastModified property: Returns the date and time the container * was last modified. Any operation that modifies the blob, including an * update of the blob's metadata or properties, changes the last-modified * time of the blob. * * @return the lastModified value. */ public OffsetDateTime getLastModified() { return this.lastModified; } /** * Set the lastModified property: Returns the date and time the container * was last modified. Any operation that modifies the blob, including an * update of the blob's metadata or properties, changes the last-modified * time of the blob. * * @param lastModified the lastModified value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLastModified(OffsetDateTime lastModified) { this.lastModified = lastModified; return this; } /** * Get the metadata property: The metadata property. * * @return the metadata value. */ public Map<String, String> getMetadata() { return this.metadata; } /** * Set the metadata property: The metadata property. * * @param metadata the metadata value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setMetadata(Map<String, String> metadata) { this.metadata = metadata; return this; } /** * Get the objectReplicationDestinationPolicyId property: Optional. Only * valid when Object Replication is enabled for the storage container and * on the destination blob of the replication. * * @return the objectReplicationDestinationPolicyId value. */ public String getObjectReplicationDestinationPolicyId() { return this.objectReplicationDestinationPolicyId; } /** * Set the objectReplicationDestinationPolicyId property: Optional. Only * valid when Object Replication is enabled for the storage container and * on the destination blob of the replication. * * @param objectReplicationDestinationPolicyId the * objectReplicationDestinationPolicyId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setObjectReplicationDestinationPolicyId(String objectReplicationDestinationPolicyId) { this.objectReplicationDestinationPolicyId = objectReplicationDestinationPolicyId; return this; } /** * Get the objectReplicationSourcePolicies property: The * objectReplicationSourcePolicies property. * * @return the objectReplicationSourcePolicies value. */ public List<ObjectReplicationPolicy> getObjectReplicationSourcePolicies() { return Collections.unmodifiableList(this.objectReplicationSourcePolicies); } /** * Set the objectReplicationSourcePolicies property: The * objectReplicationSourcePolicies property. * * @param objectReplicationSourcePolicies the objectReplicationSourcePolicies value * to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setObjectReplicationSourcePolicies( List<ObjectReplicationPolicy> objectReplicationSourcePolicies) { this.objectReplicationSourcePolicies = objectReplicationSourcePolicies; return this; } /** * Get the contentLength property: The number of bytes present in the * response body. * * @return the contentLength value. */ public Long getContentLength() { return this.contentLength; } /** * Set the contentLength property: The number of bytes present in the * response body. * * @param contentLength the contentLength value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentLength(Long contentLength) { this.contentLength = contentLength; return this; } /** * Get the contentType property: The media type of the body of the * response. For Download Blob this is 'application/octet-stream'. * * @return the contentType value. */ public String getContentType() { return this.contentType; } /** * Set the contentType property: The media type of the body of the * response. For Download Blob this is 'application/octet-stream'. * * @param contentType the contentType value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentType(String contentType) { this.contentType = contentType; return this; } /** * Get the contentRange property: Indicates the range of bytes returned in * the event that the client requested a subset of the blob by setting the * 'Range' request header. * * @return the contentRange value. */ public String getContentRange() { return this.contentRange; } /** * Set the contentRange property: Indicates the range of bytes returned in * the event that the client requested a subset of the blob by setting the * 'Range' request header. * * @param contentRange the contentRange value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentRange(String contentRange) { this.contentRange = contentRange; return this; } /** * Get the eTag property: The ETag contains a value that you can use to * perform operations conditionally. If the request version is 2011-08-18 * or newer, the ETag value will be in quotes. * * @return the eTag value. */ public String getETag() { return this.eTag; } /** * Set the eTag property: The ETag contains a value that you can use to * perform operations conditionally. If the request version is 2011-08-18 * or newer, the ETag value will be in quotes. * * @param eTag the eTag value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setETag(String eTag) { this.eTag = eTag; return this; } /** * Get the contentMd5 property: If the blob has an MD5 hash and this * operation is to read the full blob, this response header is returned so * that the client can check for message content integrity. * * @return the contentMd5 value. */ public byte[] getContentMd5() { return CoreUtils.clone(this.contentMd5); } /** * Set the contentMd5 property: If the blob has an MD5 hash and this * operation is to read the full blob, this response header is returned so * that the client can check for message content integrity. * * @param contentMd5 the contentMd5 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentMd5(byte[] contentMd5) { this.contentMd5 = CoreUtils.clone(contentMd5); return this; } /** * Get the contentEncoding property: This header returns the value that was * specified for the Content-Encoding request header. * * @return the contentEncoding value. */ public String getContentEncoding() { return this.contentEncoding; } /** * Set the contentEncoding property: This header returns the value that was * specified for the Content-Encoding request header. * * @param contentEncoding the contentEncoding value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentEncoding(String contentEncoding) { this.contentEncoding = contentEncoding; return this; } /** * Get the cacheControl property: This header is returned if it was * previously specified for the blob. * * @return the cacheControl value. */ public String getCacheControl() { return this.cacheControl; } /** * Set the cacheControl property: This header is returned if it was * previously specified for the blob. * * @param cacheControl the cacheControl value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCacheControl(String cacheControl) { this.cacheControl = cacheControl; return this; } /** * Get the contentDisposition property: This header returns the value that * was specified for the 'x-ms-blob-content-disposition' header. The * Content-Disposition response header field conveys additional information * about how to process the response payload, and also can be used to * attach additional metadata. For example, if set to attachment, it * indicates that the user-agent should not display the response, but * instead show a Save As dialog with a filename other than the blob name * specified. * * @return the contentDisposition value. */ public String getContentDisposition() { return this.contentDisposition; } /** * Set the contentDisposition property: This header returns the value that * was specified for the 'x-ms-blob-content-disposition' header. The * Content-Disposition response header field conveys additional information * about how to process the response payload, and also can be used to * attach additional metadata. For example, if set to attachment, it * indicates that the user-agent should not display the response, but * instead show a Save As dialog with a filename other than the blob name * specified. * * @param contentDisposition the contentDisposition value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentDisposition(String contentDisposition) { this.contentDisposition = contentDisposition; return this; } /** * Get the contentLanguage property: This header returns the value that was * specified for the Content-Language request header. * * @return the contentLanguage value. */ public String getContentLanguage() { return this.contentLanguage; } /** * Set the contentLanguage property: This header returns the value that was * specified for the Content-Language request header. * * @param contentLanguage the contentLanguage value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentLanguage(String contentLanguage) { this.contentLanguage = contentLanguage; return this; } /** * Get the blobSequenceNumber property: The current sequence number for a * page blob. This header is not returned for block blobs or append blobs. * * @return the blobSequenceNumber value. */ public Long getBlobSequenceNumber() { return this.blobSequenceNumber; } /** * Set the blobSequenceNumber property: The current sequence number for a * page blob. This header is not returned for block blobs or append blobs. * * @param blobSequenceNumber the blobSequenceNumber value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobSequenceNumber(Long blobSequenceNumber) { this.blobSequenceNumber = blobSequenceNumber; return this; } /** * Get the blobType property: The blob's type. Possible values include: * 'BlockBlob', 'PageBlob', 'AppendBlob'. * * @return the blobType value. */ public BlobType getBlobType() { return this.blobType; } /** * Set the blobType property: The blob's type. Possible values include: * 'BlockBlob', 'PageBlob', 'AppendBlob'. * * @param blobType the blobType value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobType(BlobType blobType) { this.blobType = blobType; return this; } /** * Get the copyCompletionTime property: Conclusion time of the last * attempted Copy Blob operation where this blob was the destination blob. * This value can specify the time of a completed, aborted, or failed copy * attempt. This header does not appear if a copy is pending, if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @return the copyCompletionTime value. */ public OffsetDateTime getCopyCompletionTime() { return this.copyCompletionTime; } /** * Set the copyCompletionTime property: Conclusion time of the last * attempted Copy Blob operation where this blob was the destination blob. * This value can specify the time of a completed, aborted, or failed copy * attempt. This header does not appear if a copy is pending, if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @param copyCompletionTime the copyCompletionTime value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyCompletionTime(OffsetDateTime copyCompletionTime) { this.copyCompletionTime = copyCompletionTime; return this; } /** * Get the copyStatusDescription property: Only appears when * x-ms-copy-status is failed or pending. Describes the cause of the last * fatal or non-fatal copy operation failure. This header does not appear * if this blob has never been the destination in a Copy Blob operation, or * if this blob has been modified after a concluded Copy Blob operation * using Set Blob Properties, Put Blob, or Put Block List. * * @return the copyStatusDescription value. */ public String getCopyStatusDescription() { return this.copyStatusDescription; } /** * Set the copyStatusDescription property: Only appears when * x-ms-copy-status is failed or pending. Describes the cause of the last * fatal or non-fatal copy operation failure. This header does not appear * if this blob has never been the destination in a Copy Blob operation, or * if this blob has been modified after a concluded Copy Blob operation * using Set Blob Properties, Put Blob, or Put Block List. * * @param copyStatusDescription the copyStatusDescription value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyStatusDescription(String copyStatusDescription) { this.copyStatusDescription = copyStatusDescription; return this; } /** * Get the copyId property: String identifier for this copy operation. Use * with Get Blob Properties to check the status of this copy operation, or * pass to Abort Copy Blob to abort a pending copy. * * @return the copyId value. */ public String getCopyId() { return this.copyId; } /** * Set the copyId property: String identifier for this copy operation. Use * with Get Blob Properties to check the status of this copy operation, or * pass to Abort Copy Blob to abort a pending copy. * * @param copyId the copyId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyId(String copyId) { this.copyId = copyId; return this; } /** * Get the copyProgress property: Contains the number of bytes copied and * the total bytes in the source in the last attempted Copy Blob operation * where this blob was the destination blob. Can show between 0 and * Content-Length bytes copied. This header does not appear if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @return the copyProgress value. */ public String getCopyProgress() { return this.copyProgress; } /** * Set the copyProgress property: Contains the number of bytes copied and * the total bytes in the source in the last attempted Copy Blob operation * where this blob was the destination blob. Can show between 0 and * Content-Length bytes copied. This header does not appear if this blob * has never been the destination in a Copy Blob operation, or if this blob * has been modified after a concluded Copy Blob operation using Set Blob * Properties, Put Blob, or Put Block List. * * @param copyProgress the copyProgress value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyProgress(String copyProgress) { this.copyProgress = copyProgress; return this; } /** * Get the copySource property: URL up to 2 KB in length that specifies the * source blob or file used in the last attempted Copy Blob operation where * this blob was the destination blob. This header does not appear if this * blob has never been the destination in a Copy Blob operation, or if this * blob has been modified after a concluded Copy Blob operation using Set * Blob Properties, Put Blob, or Put Block List. * * @return the copySource value. */ public String getCopySource() { return this.copySource; } /** * Set the copySource property: URL up to 2 KB in length that specifies the * source blob or file used in the last attempted Copy Blob operation where * this blob was the destination blob. This header does not appear if this * blob has never been the destination in a Copy Blob operation, or if this * blob has been modified after a concluded Copy Blob operation using Set * Blob Properties, Put Blob, or Put Block List. * * @param copySource the copySource value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopySource(String copySource) { this.copySource = copySource; return this; } /** * Get the copyStatus property: State of the copy operation identified by * x-ms-copy-id. Possible values include: 'pending', 'success', 'aborted', * 'failed'. * * @return the copyStatus value. */ public CopyStatusType getCopyStatus() { return this.copyStatus; } /** * Set the copyStatus property: State of the copy operation identified by * x-ms-copy-id. Possible values include: 'pending', 'success', 'aborted', * 'failed'. * * @param copyStatus the copyStatus value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setCopyStatus(CopyStatusType copyStatus) { this.copyStatus = copyStatus; return this; } /** * Get the leaseDuration property: When a blob is leased, specifies whether * the lease is of infinite or fixed duration. Possible values include: * 'infinite', 'fixed'. * * @return the leaseDuration value. */ public LeaseDurationType getLeaseDuration() { return this.leaseDuration; } /** * Set the leaseDuration property: When a blob is leased, specifies whether * the lease is of infinite or fixed duration. Possible values include: * 'infinite', 'fixed'. * * @param leaseDuration the leaseDuration value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLeaseDuration(LeaseDurationType leaseDuration) { this.leaseDuration = leaseDuration; return this; } /** * Get the leaseState property: Lease state of the blob. Possible values * include: 'available', 'leased', 'expired', 'breaking', 'broken'. * * @return the leaseState value. */ public LeaseStateType getLeaseState() { return this.leaseState; } /** * Set the leaseState property: Lease state of the blob. Possible values * include: 'available', 'leased', 'expired', 'breaking', 'broken'. * * @param leaseState the leaseState value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLeaseState(LeaseStateType leaseState) { this.leaseState = leaseState; return this; } /** * Get the leaseStatus property: The current lease status of the blob. * Possible values include: 'locked', 'unlocked'. * * @return the leaseStatus value. */ public LeaseStatusType getLeaseStatus() { return this.leaseStatus; } /** * Set the leaseStatus property: The current lease status of the blob. * Possible values include: 'locked', 'unlocked'. * * @param leaseStatus the leaseStatus value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setLeaseStatus(LeaseStatusType leaseStatus) { this.leaseStatus = leaseStatus; return this; } /** * Get the clientRequestId property: If a client request id header is sent * in the request, this header will be present in the response with the * same value. * * @return the clientRequestId value. */ public String getClientRequestId() { return this.clientRequestId; } /** * Set the clientRequestId property: If a client request id header is sent * in the request, this header will be present in the response with the * same value. * * @param clientRequestId the clientRequestId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setClientRequestId(String clientRequestId) { this.clientRequestId = clientRequestId; return this; } /** * Get the requestId property: This header uniquely identifies the request * that was made and can be used for troubleshooting the request. * * @return the requestId value. */ public String getRequestId() { return this.requestId; } /** * Set the requestId property: This header uniquely identifies the request * that was made and can be used for troubleshooting the request. * * @param requestId the requestId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setRequestId(String requestId) { this.requestId = requestId; return this; } /** * Get the version property: Indicates the version of the Blob service used * to execute the request. This header is returned for requests made * against version 2009-09-19 and above. * * @return the version value. */ public String getVersion() { return this.version; } /** * Set the version property: Indicates the version of the Blob service used * to execute the request. This header is returned for requests made * against version 2009-09-19 and above. * * @param version the version value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setVersion(String version) { this.version = version; return this; } /** * Get the versionId property: A DateTime value returned by the service * that uniquely identifies the blob. The value of this header indicates * the blob version, and may be used in subsequent requests to access this * version of the blob. * * @return the versionId value. */ public String getVersionId() { return this.versionId; } /** * Set the versionId property: A DateTime value returned by the service * that uniquely identifies the blob. The value of this header indicates * the blob version, and may be used in subsequent requests to access this * version of the blob. * * @param versionId the versionId value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setVersionId(String versionId) { this.versionId = versionId; return this; } /** * Get the acceptRanges property: Indicates that the service supports * requests for partial blob content. * * @return the acceptRanges value. */ public String getAcceptRanges() { return this.acceptRanges; } /** * Set the acceptRanges property: Indicates that the service supports * requests for partial blob content. * * @param acceptRanges the acceptRanges value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setAcceptRanges(String acceptRanges) { this.acceptRanges = acceptRanges; return this; } /** * Get the dateProperty property: UTC date/time value generated by the * service that indicates the time at which the response was initiated. * * @return the dateProperty value. */ public OffsetDateTime getDateProperty() { return this.dateProperty; } /** * Set the dateProperty property: UTC date/time value generated by the * service that indicates the time at which the response was initiated. * * @param dateProperty the dateProperty value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setDateProperty(OffsetDateTime dateProperty) { this.dateProperty = dateProperty; return this; } /** * Get the blobCommittedBlockCount property: The number of committed blocks * present in the blob. This header is returned only for append blobs. * * @return the blobCommittedBlockCount value. */ public Integer getBlobCommittedBlockCount() { return this.blobCommittedBlockCount; } /** * Set the blobCommittedBlockCount property: The number of committed blocks * present in the blob. This header is returned only for append blobs. * * @param blobCommittedBlockCount the blobCommittedBlockCount value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobCommittedBlockCount(Integer blobCommittedBlockCount) { this.blobCommittedBlockCount = blobCommittedBlockCount; return this; } /** * Get the isServerEncrypted property: The value of this header is set to * true if the blob data and application metadata are completely encrypted * using the specified algorithm. Otherwise, the value is set to false * (when the blob is unencrypted, or if only parts of the blob/application * metadata are encrypted). * * @return the isServerEncrypted value. */ public Boolean isServerEncrypted() { return this.isServerEncrypted; } /** * Set the isServerEncrypted property: The value of this header is set to * true if the blob data and application metadata are completely encrypted * using the specified algorithm. Otherwise, the value is set to false * (when the blob is unencrypted, or if only parts of the blob/application * metadata are encrypted). * * @param isServerEncrypted the isServerEncrypted value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setIsServerEncrypted(Boolean isServerEncrypted) { this.isServerEncrypted = isServerEncrypted; return this; } /** * Get the encryptionKeySha256 property: The SHA-256 hash of the encryption * key used to encrypt the blob. This header is only returned when the blob * was encrypted with a customer-provided key. * * @return the encryptionKeySha256 value. */ public String getEncryptionKeySha256() { return this.encryptionKeySha256; } /** * Set the encryptionKeySha256 property: The SHA-256 hash of the encryption * key used to encrypt the blob. This header is only returned when the blob * was encrypted with a customer-provided key. * * @param encryptionKeySha256 the encryptionKeySha256 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setEncryptionKeySha256(String encryptionKeySha256) { this.encryptionKeySha256 = encryptionKeySha256; return this; } /** * Get the encryptionScope property: Returns the name of the encryption * scope used to encrypt the blob contents and application metadata. Note * that the absence of this header implies use of the default account * encryption scope. * * @return the encryptionScope value. */ public String getEncryptionScope() { return this.encryptionScope; } /** * Set the encryptionScope property: Returns the name of the encryption * scope used to encrypt the blob contents and application metadata. Note * that the absence of this header implies use of the default account * encryption scope. * * @param encryptionScope the encryptionScope value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setEncryptionScope(String encryptionScope) { this.encryptionScope = encryptionScope; return this; } /** * Get the blobContentMD5 property: If the blob has a MD5 hash, and if * request contains range header (Range or x-ms-range), this response * header is returned with the value of the whole blob's MD5 value. This * value may or may not be equal to the value returned in Content-MD5 * header, with the latter calculated from the requested range. * * @return the blobContentMD5 value. */ public byte[] getBlobContentMD5() { return CoreUtils.clone(this.blobContentMD5); } /** * Set the blobContentMD5 property: If the blob has a MD5 hash, and if * request contains range header (Range or x-ms-range), this response * header is returned with the value of the whole blob's MD5 value. This * value may or may not be equal to the value returned in Content-MD5 * header, with the latter calculated from the requested range. * * @param blobContentMD5 the blobContentMD5 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setBlobContentMD5(byte[] blobContentMD5) { this.blobContentMD5 = CoreUtils.clone(blobContentMD5); return this; } /** * Get the tagCount property: The number of tags associated with the blob. * * @return the tagCount value. */ public Long getTagCount() { return this.tagCount; } /** * Set the tagCount property: The number of tags associated with the blob. * * @param tagCount the tagCount value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setTagCount(Long tagCount) { this.tagCount = tagCount; return this; } /** * Get the contentCrc64 property: If the request is to read a specified * range and the x-ms-range-get-content-crc64 is set to true, then the * request returns a crc64 for the range, as long as the range size is less * than or equal to 4 MB. If both x-ms-range-get-content-crc64 &amp; * x-ms-range-get-content-md5 is specified in the same request, it will * fail with 400(Bad Request). * * @return the contentCrc64 value. */ public byte[] getContentCrc64() { return CoreUtils.clone(this.contentCrc64); } /** * Set the contentCrc64 property: If the request is to read a specified * range and the x-ms-range-get-content-crc64 is set to true, then the * request returns a crc64 for the range, as long as the range size is less * than or equal to 4 MB. If both x-ms-range-get-content-crc64 &amp; * x-ms-range-get-content-md5 is specified in the same request, it will * fail with 400(Bad Request). * * @param contentCrc64 the contentCrc64 value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setContentCrc64(byte[] contentCrc64) { this.contentCrc64 = CoreUtils.clone(contentCrc64); return this; } /** * Get the errorCode property: The errorCode property. * * @return the errorCode value. */ public String getErrorCode() { return this.errorCode; } /** * Set the errorCode property: The errorCode property. * * @param errorCode the errorCode value to set. * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders setErrorCode(String errorCode) { this.errorCode = errorCode; return this; } }
Same comment here
public PolygonGeometry(List<LineGeometry> rings, GeometryBoundingBox boundingBox, Map<String, Object> properties) { super(boundingBox, properties); Objects.requireNonNull(rings, "'rings' cannot be null."); this.rings = Collections.unmodifiableList(new ArrayList<>(rings)); }
this.rings = Collections.unmodifiableList(new ArrayList<>(rings));
public PolygonGeometry(List<LineGeometry> rings, GeometryBoundingBox boundingBox, Map<String, Object> properties) { super(boundingBox, properties); Objects.requireNonNull(rings, "'rings' cannot be null."); this.rings = Collections.unmodifiableList(new ArrayList<>(rings)); }
class PolygonGeometry extends Geometry { private final List<LineGeometry> rings; /** * Constructs a geometric polygon. * * @param rings The lines that define the polygon. * @throws NullPointerException If {@code rings} is {@code null}. */ public PolygonGeometry(List<LineGeometry> rings) { this(rings, null, null); } /** * Constructs a geometric polygon. * * @param rings The lines that define the polygon. * @param boundingBox Bounding box for the polygon. * @param properties Additional properties of the polygon. * @throws NullPointerException If {@code rings} is {@code null}. */ /** * Unmodifiable representation of the {@link LineGeometry geometric lines} representing this polygon. * * @return An unmodifiable representation of the {@link LineGeometry geometric lines} representing this polygon. */ public List<LineGeometry> getRings() { return rings; } }
class PolygonGeometry extends Geometry { private final List<LineGeometry> rings; /** * Constructs a geometric polygon. * * @param rings The lines that define the polygon. * @throws NullPointerException If {@code rings} is {@code null}. */ public PolygonGeometry(List<LineGeometry> rings) { this(rings, null, null); } /** * Constructs a geometric polygon. * * @param rings The lines that define the polygon. * @param boundingBox Bounding box for the polygon. * @param properties Additional properties of the polygon. * @throws NullPointerException If {@code rings} is {@code null}. */ /** * Unmodifiable representation of the {@link LineGeometry geometric lines} representing this polygon. * * @return An unmodifiable representation of the {@link LineGeometry geometric lines} representing this polygon. */ public List<LineGeometry> getRings() { return rings; } @Override public int hashCode() { return Objects.hash(rings, super.hashCode()); } @Override public boolean equals(Object obj) { if (!(obj instanceof PolygonGeometry)) { return false; } if (this == obj) { return true; } PolygonGeometry other = (PolygonGeometry) obj; return super.equals(obj) && Objects.equals(rings, other.rings); } }
Probably not going to have that for this PR, but we should consider this since the copy will be done for every message receive.
public Mono<Boolean> tryAdd(final T object) { if (object == null) { throw logger.logExceptionAsWarning(new IllegalArgumentException("object cannot be null")); } ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); return objectSerializer.serialize(outputStream, object).map(s -> tryAdd(new EventData(s.toByteArray()))); }
return objectSerializer.serialize(outputStream, object).map(s -> tryAdd(new EventData(s.toByteArray())));
public Mono<Boolean> tryAdd(final T object) { if (object == null) { return monoError(logger, new IllegalArgumentException("object cannot be null")); } ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); return serializer.serialize(outputStream, object).map(s -> tryAdd(new EventData(s.toByteArray()))); }
class ObjectBatch<T> extends EventDataBatchBase { private final ClientLogger logger = new ClientLogger(ObjectBatch.class); private final Class<T> batchType; private final ObjectSerializer objectSerializer; ObjectBatch(int maxMessageSize, String partitionId, String partitionKey, Class<T> batchType, ErrorContextProvider contextProvider, TracerProvider tracerProvider, ObjectSerializer objectSerializer, String entityPath, String hostname) { super(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname); this.batchType = Objects.requireNonNull(batchType, "'batchType' cannot be null."); this.objectSerializer = Objects.requireNonNull(objectSerializer, "'objectSerializer' cannot be null."); } /** * Tries to asynchronously serialize an object into an EventData payload and add the EventData to the batch. * * @param object The object to add to the batch. * @return {@code true} if the object could be added to the batch; {@code false} if the serialized * object was too large to fit in the batch. * @throws IllegalArgumentException if object is {@code null}. * @throws AmqpException if serialized object as {@link EventData} is larger than the maximum size * of the {@link EventDataBatch}. */ }
class ObjectBatch<T> extends EventDataBatchBase { private final ClientLogger logger = new ClientLogger(ObjectBatch.class); private final Class<T> batchType; private final ObjectSerializer serializer; ObjectBatch(int maxMessageSize, String partitionId, String partitionKey, Class<T> batchType, ErrorContextProvider contextProvider, TracerProvider tracerProvider, ObjectSerializer serializer, String entityPath, String hostname) { super(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname); this.batchType = Objects.requireNonNull(batchType, "'batchType' cannot be null."); this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null."); } /** * Tries to asynchronously serialize an object into an EventData payload and add the EventData to the batch. * * @param object The object to add to the batch. * @return {@code true} if the object could be added to the batch; {@code false} if the serialized * object was too large to fit in the batch. * @throws IllegalArgumentException if object is {@code null}. * @throws AmqpException if serialized object as {@link EventData} is larger than the maximum size * of the {@link EventDataBatch}. */ }
Same response
public PolygonGeometry(List<LineGeometry> rings, GeometryBoundingBox boundingBox, Map<String, Object> properties) { super(boundingBox, properties); Objects.requireNonNull(rings, "'rings' cannot be null."); this.rings = Collections.unmodifiableList(new ArrayList<>(rings)); }
this.rings = Collections.unmodifiableList(new ArrayList<>(rings));
public PolygonGeometry(List<LineGeometry> rings, GeometryBoundingBox boundingBox, Map<String, Object> properties) { super(boundingBox, properties); Objects.requireNonNull(rings, "'rings' cannot be null."); this.rings = Collections.unmodifiableList(new ArrayList<>(rings)); }
class PolygonGeometry extends Geometry { private final List<LineGeometry> rings; /** * Constructs a geometric polygon. * * @param rings The lines that define the polygon. * @throws NullPointerException If {@code rings} is {@code null}. */ public PolygonGeometry(List<LineGeometry> rings) { this(rings, null, null); } /** * Constructs a geometric polygon. * * @param rings The lines that define the polygon. * @param boundingBox Bounding box for the polygon. * @param properties Additional properties of the polygon. * @throws NullPointerException If {@code rings} is {@code null}. */ /** * Unmodifiable representation of the {@link LineGeometry geometric lines} representing this polygon. * * @return An unmodifiable representation of the {@link LineGeometry geometric lines} representing this polygon. */ public List<LineGeometry> getRings() { return rings; } }
class PolygonGeometry extends Geometry { private final List<LineGeometry> rings; /** * Constructs a geometric polygon. * * @param rings The lines that define the polygon. * @throws NullPointerException If {@code rings} is {@code null}. */ public PolygonGeometry(List<LineGeometry> rings) { this(rings, null, null); } /** * Constructs a geometric polygon. * * @param rings The lines that define the polygon. * @param boundingBox Bounding box for the polygon. * @param properties Additional properties of the polygon. * @throws NullPointerException If {@code rings} is {@code null}. */ /** * Unmodifiable representation of the {@link LineGeometry geometric lines} representing this polygon. * * @return An unmodifiable representation of the {@link LineGeometry geometric lines} representing this polygon. */ public List<LineGeometry> getRings() { return rings; } @Override public int hashCode() { return Objects.hash(rings, super.hashCode()); } @Override public boolean equals(Object obj) { if (!(obj instanceof PolygonGeometry)) { return false; } if (this == obj) { return true; } PolygonGeometry other = (PolygonGeometry) obj; return super.equals(obj) && Objects.equals(rings, other.rings); } }
Should this be using `monoError`?
public Mono<Boolean> tryAdd(final T object) { if (object == null) { throw logger.logExceptionAsWarning(new IllegalArgumentException("object cannot be null")); } ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); return objectSerializer.serialize(outputStream, object).map(s -> tryAdd(new EventData(s.toByteArray()))); }
throw logger.logExceptionAsWarning(new IllegalArgumentException("object cannot be null"));
public Mono<Boolean> tryAdd(final T object) { if (object == null) { return monoError(logger, new IllegalArgumentException("object cannot be null")); } ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); return serializer.serialize(outputStream, object).map(s -> tryAdd(new EventData(s.toByteArray()))); }
class ObjectBatch<T> extends EventDataBatchBase { private final ClientLogger logger = new ClientLogger(ObjectBatch.class); private final Class<T> batchType; private final ObjectSerializer objectSerializer; ObjectBatch(int maxMessageSize, String partitionId, String partitionKey, Class<T> batchType, ErrorContextProvider contextProvider, TracerProvider tracerProvider, ObjectSerializer objectSerializer, String entityPath, String hostname) { super(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname); this.batchType = Objects.requireNonNull(batchType, "'batchType' cannot be null."); this.objectSerializer = Objects.requireNonNull(objectSerializer, "'objectSerializer' cannot be null."); } /** * Tries to asynchronously serialize an object into an EventData payload and add the EventData to the batch. * * @param object The object to add to the batch. * @return {@code true} if the object could be added to the batch; {@code false} if the serialized * object was too large to fit in the batch. * @throws IllegalArgumentException if object is {@code null}. * @throws AmqpException if serialized object as {@link EventData} is larger than the maximum size * of the {@link EventDataBatch}. */ }
class ObjectBatch<T> extends EventDataBatchBase { private final ClientLogger logger = new ClientLogger(ObjectBatch.class); private final Class<T> batchType; private final ObjectSerializer serializer; ObjectBatch(int maxMessageSize, String partitionId, String partitionKey, Class<T> batchType, ErrorContextProvider contextProvider, TracerProvider tracerProvider, ObjectSerializer serializer, String entityPath, String hostname) { super(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname); this.batchType = Objects.requireNonNull(batchType, "'batchType' cannot be null."); this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null."); } /** * Tries to asynchronously serialize an object into an EventData payload and add the EventData to the batch. * * @param object The object to add to the batch. * @return {@code true} if the object could be added to the batch; {@code false} if the serialized * object was too large to fit in the batch. * @throws IllegalArgumentException if object is {@code null}. * @throws AmqpException if serialized object as {@link EventData} is larger than the maximum size * of the {@link EventDataBatch}. */ }
Given that this is internal a cool optimization would be creating a private extension of `ByteArrayOuputStream` to access its backing `byte[]` directly. `toByteArray` will make a clone of the `byte[]`.
public Mono<Boolean> tryAdd(final T object) { if (object == null) { throw logger.logExceptionAsWarning(new IllegalArgumentException("object cannot be null")); } ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); return objectSerializer.serialize(outputStream, object).map(s -> tryAdd(new EventData(s.toByteArray()))); }
return objectSerializer.serialize(outputStream, object).map(s -> tryAdd(new EventData(s.toByteArray())));
public Mono<Boolean> tryAdd(final T object) { if (object == null) { return monoError(logger, new IllegalArgumentException("object cannot be null")); } ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); return serializer.serialize(outputStream, object).map(s -> tryAdd(new EventData(s.toByteArray()))); }
class ObjectBatch<T> extends EventDataBatchBase { private final ClientLogger logger = new ClientLogger(ObjectBatch.class); private final Class<T> batchType; private final ObjectSerializer objectSerializer; ObjectBatch(int maxMessageSize, String partitionId, String partitionKey, Class<T> batchType, ErrorContextProvider contextProvider, TracerProvider tracerProvider, ObjectSerializer objectSerializer, String entityPath, String hostname) { super(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname); this.batchType = Objects.requireNonNull(batchType, "'batchType' cannot be null."); this.objectSerializer = Objects.requireNonNull(objectSerializer, "'objectSerializer' cannot be null."); } /** * Tries to asynchronously serialize an object into an EventData payload and add the EventData to the batch. * * @param object The object to add to the batch. * @return {@code true} if the object could be added to the batch; {@code false} if the serialized * object was too large to fit in the batch. * @throws IllegalArgumentException if object is {@code null}. * @throws AmqpException if serialized object as {@link EventData} is larger than the maximum size * of the {@link EventDataBatch}. */ }
class ObjectBatch<T> extends EventDataBatchBase { private final ClientLogger logger = new ClientLogger(ObjectBatch.class); private final Class<T> batchType; private final ObjectSerializer serializer; ObjectBatch(int maxMessageSize, String partitionId, String partitionKey, Class<T> batchType, ErrorContextProvider contextProvider, TracerProvider tracerProvider, ObjectSerializer serializer, String entityPath, String hostname) { super(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname); this.batchType = Objects.requireNonNull(batchType, "'batchType' cannot be null."); this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null."); } /** * Tries to asynchronously serialize an object into an EventData payload and add the EventData to the batch. * * @param object The object to add to the batch. * @return {@code true} if the object could be added to the batch; {@code false} if the serialized * object was too large to fit in the batch. * @throws IllegalArgumentException if object is {@code null}. * @throws AmqpException if serialized object as {@link EventData} is larger than the maximum size * of the {@link EventDataBatch}. */ }
You should use `entityType` instead of topicName for your comparison
public ServiceBusSenderAsyncClient buildAsyncClient() { final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); if (!CoreUtils.isNullOrEmpty(viaQueueName) && !CoreUtils.isNullOrEmpty(topicName)) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "(%s), Via queue feature work only with a queue.", viaQueueName))); } final String entityName; switch (entityType) { case QUEUE: entityName = queueName; break; case SUBSCRIPTION: entityName = topicName; break; case UNKNOWN: entityName = connectionStringEntityName; break; default: throw logger.logExceptionAsError( new IllegalArgumentException("Unknown entity type: " + entityType)); } return new ServiceBusSenderAsyncClient(entityName, viaQueueName, entityType, connectionProcessor, retryOptions, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose); }
if (!CoreUtils.isNullOrEmpty(viaQueueName) && !CoreUtils.isNullOrEmpty(topicName)) {
public ServiceBusSenderAsyncClient buildAsyncClient() { final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); if (!CoreUtils.isNullOrEmpty(viaQueueName) && entityType == MessagingEntityType.SUBSCRIPTION) { throw logger.logExceptionAsError(new IllegalStateException(String.format( "(%s), Via queue feature work only with a queue.", viaQueueName))); } final String entityName; switch (entityType) { case QUEUE: entityName = queueName; break; case SUBSCRIPTION: entityName = topicName; break; case UNKNOWN: entityName = connectionStringEntityName; break; default: throw logger.logExceptionAsError( new IllegalArgumentException("Unknown entity type: " + entityType)); } return new ServiceBusSenderAsyncClient(entityName, entityType, connectionProcessor, retryOptions, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, viaQueueName); }
class ServiceBusSenderClientBuilder { private String queueName; private String topicName; private String viaQueueName; private ServiceBusSenderClientBuilder() { } /** * Sets the name of the Service Bus queue to publish messages to. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the name of the initial destination Service Bus queue to publish messages to. * * @param viaQueueName The initial destination of the message. * * @return The modified {@link ServiceBusSenderClientBuilder} object. * * @see <a href="https: */ public ServiceBusSenderClientBuilder viaQueueName(String viaQueueName) { this.viaQueueName = viaQueueName; return this; } /** * Sets the name of the Service Bus topic to publish messages to. * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link * ServiceBusMessage} to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ /** * Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage} * to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ public ServiceBusSenderClient buildClient() { return new ServiceBusSenderClient(buildAsyncClient(), retryOptions.getTryTimeout()); } }
class ServiceBusSenderClientBuilder { private String queueName; private String topicName; private String viaQueueName; private ServiceBusSenderClientBuilder() { } /** * Sets the name of the Service Bus queue to publish messages to. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the name of the initial destination Service Bus queue to publish messages to. * * @param viaQueueName The initial destination of the message. * * @return The modified {@link ServiceBusSenderClientBuilder} object. * * @see <a href="https: */ public ServiceBusSenderClientBuilder viaQueueName(String viaQueueName) { this.viaQueueName = viaQueueName; return this; } /** * Sets the name of the Service Bus topic to publish messages to. * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link * ServiceBusMessage} to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * {@link * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ /** * Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage} * to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ public ServiceBusSenderClient buildClient() { return new ServiceBusSenderClient(buildAsyncClient(), retryOptions.getTryTimeout()); } }
Also add documentation for this runtime error. It should be an IllegalStateException.
public ServiceBusSenderAsyncClient buildAsyncClient() { final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); if (!CoreUtils.isNullOrEmpty(viaQueueName) && !CoreUtils.isNullOrEmpty(topicName)) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "(%s), Via queue feature work only with a queue.", viaQueueName))); } final String entityName; switch (entityType) { case QUEUE: entityName = queueName; break; case SUBSCRIPTION: entityName = topicName; break; case UNKNOWN: entityName = connectionStringEntityName; break; default: throw logger.logExceptionAsError( new IllegalArgumentException("Unknown entity type: " + entityType)); } return new ServiceBusSenderAsyncClient(entityName, viaQueueName, entityType, connectionProcessor, retryOptions, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose); }
if (!CoreUtils.isNullOrEmpty(viaQueueName) && !CoreUtils.isNullOrEmpty(topicName)) {
public ServiceBusSenderAsyncClient buildAsyncClient() { final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); if (!CoreUtils.isNullOrEmpty(viaQueueName) && entityType == MessagingEntityType.SUBSCRIPTION) { throw logger.logExceptionAsError(new IllegalStateException(String.format( "(%s), Via queue feature work only with a queue.", viaQueueName))); } final String entityName; switch (entityType) { case QUEUE: entityName = queueName; break; case SUBSCRIPTION: entityName = topicName; break; case UNKNOWN: entityName = connectionStringEntityName; break; default: throw logger.logExceptionAsError( new IllegalArgumentException("Unknown entity type: " + entityType)); } return new ServiceBusSenderAsyncClient(entityName, entityType, connectionProcessor, retryOptions, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, viaQueueName); }
class ServiceBusSenderClientBuilder { private String queueName; private String topicName; private String viaQueueName; private ServiceBusSenderClientBuilder() { } /** * Sets the name of the Service Bus queue to publish messages to. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the name of the initial destination Service Bus queue to publish messages to. * * @param viaQueueName The initial destination of the message. * * @return The modified {@link ServiceBusSenderClientBuilder} object. * * @see <a href="https: */ public ServiceBusSenderClientBuilder viaQueueName(String viaQueueName) { this.viaQueueName = viaQueueName; return this; } /** * Sets the name of the Service Bus topic to publish messages to. * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link * ServiceBusMessage} to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ /** * Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage} * to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ public ServiceBusSenderClient buildClient() { return new ServiceBusSenderClient(buildAsyncClient(), retryOptions.getTryTimeout()); } }
class ServiceBusSenderClientBuilder { private String queueName; private String topicName; private String viaQueueName; private ServiceBusSenderClientBuilder() { } /** * Sets the name of the Service Bus queue to publish messages to. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the name of the initial destination Service Bus queue to publish messages to. * * @param viaQueueName The initial destination of the message. * * @return The modified {@link ServiceBusSenderClientBuilder} object. * * @see <a href="https: */ public ServiceBusSenderClientBuilder viaQueueName(String viaQueueName) { this.viaQueueName = viaQueueName; return this; } /** * Sets the name of the Service Bus topic to publish messages to. * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link * ServiceBusMessage} to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * {@link * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ /** * Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage} * to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ public ServiceBusSenderClient buildClient() { return new ServiceBusSenderClient(buildAsyncClient(), retryOptions.getTryTimeout()); } }
Do we need to do a string format? You can append it.
private Mono<AmqpSendLink> getSendLink() { return connectionProcessor .flatMap(connection -> { String linkName = entityName; if (!CoreUtils.isNullOrEmpty(viaEntityName)) { linkName = String.format("VIA-%s", viaEntityName); } return connection.createSendLink(linkName, entityName, viaEntityName, retryOptions); }) .doOnNext(next -> linkName.compareAndSet(null, next.getLinkName())); }
linkName = String.format("VIA-%s", viaEntityName);
private Mono<AmqpSendLink> getSendLink() { return connectionProcessor .flatMap(connection -> { if (!CoreUtils.isNullOrEmpty(viaEntityName)) { return connection.createSendLink("VIA-".concat(viaEntityName), viaEntityName, retryOptions, entityName); } else { return connection.createSendLink(entityName, entityName, retryOptions, null); } }) .doOnNext(next -> linkName.compareAndSet(null, next.getLinkName())); }
class ServiceBusSenderAsyncClient implements AutoCloseable { /** * The default maximum allowable size, in bytes, for a batch to be sent. */ static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final String TRANSACTION_LINK_NAME = "coordinator"; private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(ServiceBusSenderAsyncClient.class); private final AtomicReference<String> linkName = new AtomicReference<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final MessagingEntityType entityType; private final Runnable onClientClose; private final String entityName; private final ServiceBusConnectionProcessor connectionProcessor; private final String viaEntityName; /** * Creates a new instance of this {@link ServiceBusSenderAsyncClient} that sends messages to a Service Bus entity. */ ServiceBusSenderAsyncClient(String entityName, String viaEntityName, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.entityName = Objects.requireNonNull(entityName, "'entityPath' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = tracerProvider; this.retryPolicy = getRetryPolicy(retryOptions); this.entityType = entityType; this.viaEntityName = viaEntityName; this.onClientClose = onClientClose; } /** * Gets the fully qualified namespace. * * @return The fully qualified namespace. */ public String getFullyQualifiedNamespace() { return connectionProcessor.getFullyQualifiedNamespace(); } /** * Gets the name of the Service Bus resource. * * @return The name of the Service Bus resource. */ public String getEntityPath() { return entityName; } /** * Sends a message to a Service Bus queue or topic. * * @param message Message to be sent to Service Bus queue or topic. * * @return The {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code message} is {@code null}. */ public Mono<Void> send(ServiceBusMessage message) { if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } return sendInternal(Flux.just(message), null); } /** * Sends a message to a Service Bus queue or topic. * * @param message Message to be sent to Service Bus queue or topic. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return The {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code message} is {@code null}. */ public Mono<Void> send(ServiceBusMessage message, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendInternal(Flux.just(message), transactionContext); } /** * Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages * exceed the maximum size of a single batch, an exception will be triggered and the send will fail. * By default, the message size is the max amount allowed on the link. * * @param messages Messages to be sent to Service Bus queue or topic. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource. * * @throws NullPointerException if {@code batch}, {@code transactionContext} or * {@code transactionContext.transactionID} is {@code null}. * @throws AmqpException if {@code messages} is larger than the maximum allowed size of a single batch. */ public Mono<Void> send(Iterable<ServiceBusMessage> messages, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendIterable(messages, transactionContext); } /** * Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages exceed * the maximum size of a single batch, an exception will be triggered and the send will fail. By default, the * message size is the max amount allowed on the link. * * @param messages Messages to be sent to Service Bus queue or topic. * * @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource. * * @throws NullPointerException if {@code messages} is {@code null}. * @throws AmqpException if {@code messages} is larger than the maximum allowed size of a single batch. */ public Mono<Void> send(Iterable<ServiceBusMessage> messages) { return sendIterable(messages, null); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * * @return A {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code batch} is {@code null}. */ public Mono<Void> send(ServiceBusMessageBatch batch) { return sendInternal(batch, null); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code batch}, {@code transactionContext} or * {@code transactionContext.transactionID} is {@code null}. */ public Mono<Void> send(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendInternal(batch, transactionContext); } /** * Creates a {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows. * * @return A {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows. */ public Mono<ServiceBusMessageBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link ServiceBusMessageBatch} configured with the options specified. * * @param options A set of options used to configure the {@link ServiceBusMessageBatch}. * * @return A new {@link ServiceBusMessageBatch} configured with the given options. * @throws NullPointerException if {@code options} is null. */ public Mono<ServiceBusMessageBatch> createBatch(CreateBatchOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final int maxSize = options.getMaximumSizeInBytes(); return getSendLink().flatMap(link -> link.getLinkSize().flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (maxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getMaximumSizeInBytes (%s bytes) is larger than the link size" + " (%s bytes).", maxSize, maximumLinkSize))); } final int batchSize = maxSize > 0 ? maxSize : maximumLinkSize; return Mono.just( new ServiceBusMessageBatch(batchSize, link::getErrorContext, tracerProvider, messageSerializer)); })); } /** * Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is * enqueued and made available to receivers only at the scheduled enqueue time. * * @param message Message to be sent to the Service Bus Queue. * @param scheduledEnqueueTime Instant at which the message should appear in the Service Bus queue or topic. * @param transactionContext to be set on message before sending to Service Bus. * * @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message. * * @throws NullPointerException if {@code message}, {@code scheduledEnqueueTime}, {@code transactionContext} or * {@code transactionContext.transactionID} is {@code null}. */ public Mono<Long> scheduleMessage(ServiceBusMessage message, Instant scheduledEnqueueTime, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return scheduleMessageInternal(message, scheduledEnqueueTime, transactionContext); } /** * Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is * enqueued and made available to receivers only at the scheduled enqueue time. * * @param message Message to be sent to the Service Bus Queue. * @param scheduledEnqueueTime Instant at which the message should appear in the Service Bus queue or topic. * * @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message. * * @throws NullPointerException if {@code message} or {@code scheduledEnqueueTime} is {@code null}. */ public Mono<Long> scheduleMessage(ServiceBusMessage message, Instant scheduledEnqueueTime) { return scheduleMessageInternal(message, scheduledEnqueueTime, null); } /** * Cancels the enqueuing of an already scheduled message, if it was not already enqueued. * * @param sequenceNumber of the scheduled message to cancel. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws IllegalArgumentException if {@code sequenceNumber} is negative. */ public Mono<Void> cancelScheduledMessage(long sequenceNumber) { if (sequenceNumber < 0) { return monoError(logger, new IllegalArgumentException("'sequenceNumber' cannot be negative.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMap(managementNode -> managementNode.cancelScheduledMessage(sequenceNumber, linkName.get())); } /** * Starts a new transaction on Service Bus. The {@link ServiceBusTransactionContext} should be passed along with * {@link ServiceBusReceivedMessage} or {@link MessageLockToken} to all operations that needs to be in * this transaction. * * @return a new {@link ServiceBusTransactionContext}. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())); } /** * Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * * @param transactionContext to be committed. * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * * @param transactionContext to be rollbacked. * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Disposes of the {@link ServiceBusSenderAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } onClientClose.run(); } private Mono<Void> sendIterable(Iterable<ServiceBusMessage> messages, ServiceBusTransactionContext transaction) { if (Objects.isNull(messages)) { return monoError(logger, new NullPointerException("'messages' cannot be null.")); } return createBatch().flatMap(messageBatch -> { messages.forEach(message -> messageBatch.tryAdd(message)); return sendInternal(messageBatch, transaction); }); } private Mono<Long> scheduleMessageInternal(ServiceBusMessage message, Instant scheduledEnqueueTime, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } if (Objects.isNull(scheduledEnqueueTime)) { return monoError(logger, new NullPointerException("'scheduledEnqueueTime' cannot be null.")); } return getSendLink() .flatMap(link -> link.getLinkSize().flatMap(size -> { int maxSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMap(managementNode -> managementNode.schedule(message, scheduledEnqueueTime, maxSize, link.getLinkName(), transactionContext)); })); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} the finishes this operation on service bus resource. */ private Mono<Void> sendInternal(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(batch)) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; if (batch.getMessages().isEmpty()) { logger.info("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.info("Sending batch with size[{}].", batch.getCount()); Context sharedContext = null; final List<org.apache.qpid.proton.message.Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getMessages().size(); i++) { final ServiceBusMessage event = batch.getMessages().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } final org.apache.qpid.proton.message.Message message = messageSerializer.serialize(event); final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); message.setMessageAnnotations(messageAnnotations); messages.add(message); } final Context finalSharedContext = sharedContext != null ? sharedContext : Context.NONE; return withRetry( getSendLink().flatMap(link -> { if (isTracingEnabled) { Context entityContext = finalSharedContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } if (transactionContext != null && transactionContext.getTransactionId() != null) { final TransactionalState deliveryState = new TransactionalState(); deliveryState.setTxnId(new Binary(transactionContext.getTransactionId().array())); return messages.size() == 1 ? link.send(messages.get(0), deliveryState) : link.send(messages, deliveryState); } else { return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); } }) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }) .doOnError(error -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), Signal.error(error)); } }), retryOptions.getTryTimeout(), retryPolicy); } private Mono<Void> sendInternal(Flux<ServiceBusMessage> messages, ServiceBusTransactionContext transactionContext) { return getSendLink() .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setMaximumSizeInBytes(batchSize); return messages.collect(new AmqpMessageCollector(batchOptions, 1, link::getErrorContext, tracerProvider, messageSerializer)); }) .flatMap(list -> sendInternalBatch(Flux.fromIterable(list), transactionContext))); } private Mono<Void> sendInternalBatch(Flux<ServiceBusMessageBatch> eventBatches, ServiceBusTransactionContext transactionContext) { return eventBatches .flatMap(messageBatch -> sendInternal(messageBatch, transactionContext)) .then() .doOnError(error -> logger.error("Error sending batch.", error)); } private static class AmqpMessageCollector implements Collector<ServiceBusMessage, List<ServiceBusMessageBatch>, List<ServiceBusMessageBatch>> { private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final MessageSerializer serializer; private volatile ServiceBusMessageBatch currentBatch; AmqpMessageCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, MessageSerializer serializer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.serializer = serializer; currentBatch = new ServiceBusMessageBatch(maxMessageSize, contextProvider, tracerProvider, serializer); } @Override public Supplier<List<ServiceBusMessageBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<ServiceBusMessageBatch>, ServiceBusMessage> accumulator() { return (list, event) -> { ServiceBusMessageBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new ServiceBusMessageBatch(maxMessageSize, contextProvider, tracerProvider, serializer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<ServiceBusMessageBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<ServiceBusMessageBatch>, List<ServiceBusMessageBatch>> finisher() { return list -> { ServiceBusMessageBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class ServiceBusSenderAsyncClient implements AutoCloseable { /** * The default maximum allowable size, in bytes, for a batch to be sent. */ static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final String TRANSACTION_LINK_NAME = "coordinator"; private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(ServiceBusSenderAsyncClient.class); private final AtomicReference<String> linkName = new AtomicReference<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final MessagingEntityType entityType; private final Runnable onClientClose; private final String entityName; private final ServiceBusConnectionProcessor connectionProcessor; private final String viaEntityName; /** * Creates a new instance of this {@link ServiceBusSenderAsyncClient} that sends messages to a Service Bus entity. */ ServiceBusSenderAsyncClient(String entityName, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose, String viaEntityName) { this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.entityName = Objects.requireNonNull(entityName, "'entityPath' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = tracerProvider; this.retryPolicy = getRetryPolicy(retryOptions); this.entityType = entityType; this.viaEntityName = viaEntityName; this.onClientClose = onClientClose; } /** * Gets the fully qualified namespace. * * @return The fully qualified namespace. */ public String getFullyQualifiedNamespace() { return connectionProcessor.getFullyQualifiedNamespace(); } /** * Gets the name of the Service Bus resource. * * @return The name of the Service Bus resource. */ public String getEntityPath() { return entityName; } /** * Sends a message to a Service Bus queue or topic. * * @param message Message to be sent to Service Bus queue or topic. * * @return The {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code message} is {@code null}. */ public Mono<Void> sendMessage(ServiceBusMessage message) { if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } return sendInternal(Flux.just(message), null); } /** * Sends a message to a Service Bus queue or topic. * * @param message Message to be sent to Service Bus queue or topic. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return The {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code message} is {@code null}. */ public Mono<Void> sendMessage(ServiceBusMessage message, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendInternal(Flux.just(message), transactionContext); } /** * Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages * exceed the maximum size of a single batch, an exception will be triggered and the send will fail. * By default, the message size is the max amount allowed on the link. * * @param messages Messages to be sent to Service Bus queue or topic. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource. * * @throws NullPointerException if {@code batch}, {@code transactionContext} or * {@code transactionContext.transactionID} is {@code null}. * @throws AmqpException if {@code messages} is larger than the maximum allowed size of a single batch. */ public Mono<Void> sendMessages(Iterable<ServiceBusMessage> messages, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendIterable(messages, transactionContext); } /** * Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages exceed * the maximum size of a single batch, an exception will be triggered and the send will fail. By default, the * message size is the max amount allowed on the link. * * @param messages Messages to be sent to Service Bus queue or topic. * * @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource. * * @throws NullPointerException if {@code messages} is {@code null}. * @throws AmqpException if {@code messages} is larger than the maximum allowed size of a single batch. */ public Mono<Void> sendMessages(Iterable<ServiceBusMessage> messages) { return sendIterable(messages, null); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * * @return A {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code batch} is {@code null}. */ public Mono<Void> sendMessages(ServiceBusMessageBatch batch) { return sendInternal(batch, null); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code batch}, {@code transactionContext} or * {@code transactionContext.transactionID} is {@code null}. */ public Mono<Void> sendMessages(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendInternal(batch, transactionContext); } /** * Creates a {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows. * * @return A {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows. */ public Mono<ServiceBusMessageBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link ServiceBusMessageBatch} configured with the options specified. * * @param options A set of options used to configure the {@link ServiceBusMessageBatch}. * * @return A new {@link ServiceBusMessageBatch} configured with the given options. * @throws NullPointerException if {@code options} is null. */ public Mono<ServiceBusMessageBatch> createBatch(CreateBatchOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final int maxSize = options.getMaximumSizeInBytes(); return getSendLink().flatMap(link -> link.getLinkSize().flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (maxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getMaximumSizeInBytes (%s bytes) is larger than the link size" + " (%s bytes).", maxSize, maximumLinkSize))); } final int batchSize = maxSize > 0 ? maxSize : maximumLinkSize; return Mono.just( new ServiceBusMessageBatch(batchSize, link::getErrorContext, tracerProvider, messageSerializer)); })); } /** * Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is * enqueued and made available to receivers only at the scheduled enqueue time. * * @param message Message to be sent to the Service Bus Queue. * @param scheduledEnqueueTime Instant at which the message should appear in the Service Bus queue or topic. * @param transactionContext to be set on message before sending to Service Bus. * * @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message. * * @throws NullPointerException if {@code message}, {@code scheduledEnqueueTime}, {@code transactionContext} or * {@code transactionContext.transactionID} is {@code null}. */ public Mono<Long> scheduleMessage(ServiceBusMessage message, Instant scheduledEnqueueTime, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return scheduleMessageInternal(message, scheduledEnqueueTime, transactionContext); } /** * Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is * enqueued and made available to receivers only at the scheduled enqueue time. * * @param message Message to be sent to the Service Bus Queue. * @param scheduledEnqueueTime Instant at which the message should appear in the Service Bus queue or topic. * * @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message. * * @throws NullPointerException if {@code message} or {@code scheduledEnqueueTime} is {@code null}. */ public Mono<Long> scheduleMessage(ServiceBusMessage message, Instant scheduledEnqueueTime) { return scheduleMessageInternal(message, scheduledEnqueueTime, null); } /** * Cancels the enqueuing of an already scheduled message, if it was not already enqueued. * * @param sequenceNumber of the scheduled message to cancel. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws IllegalArgumentException if {@code sequenceNumber} is negative. */ public Mono<Void> cancelScheduledMessage(long sequenceNumber) { if (sequenceNumber < 0) { return monoError(logger, new IllegalArgumentException("'sequenceNumber' cannot be negative.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMap(managementNode -> managementNode.cancelScheduledMessage(sequenceNumber, linkName.get())); } /** * Starts a new transaction on Service Bus. The {@link ServiceBusTransactionContext} should be passed along with * {@link ServiceBusReceivedMessage} or {@code lockToken } to all operations that needs to be in * this transaction. * * @return a new {@link ServiceBusTransactionContext}. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())); } /** * Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * * @param transactionContext to be committed. * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * * @param transactionContext to be rollbacked. * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Disposes of the {@link ServiceBusSenderAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } onClientClose.run(); } private Mono<Void> sendIterable(Iterable<ServiceBusMessage> messages, ServiceBusTransactionContext transaction) { if (Objects.isNull(messages)) { return monoError(logger, new NullPointerException("'messages' cannot be null.")); } return createBatch().flatMap(messageBatch -> { messages.forEach(message -> messageBatch.tryAdd(message)); return sendInternal(messageBatch, transaction); }); } private Mono<Long> scheduleMessageInternal(ServiceBusMessage message, Instant scheduledEnqueueTime, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } if (Objects.isNull(scheduledEnqueueTime)) { return monoError(logger, new NullPointerException("'scheduledEnqueueTime' cannot be null.")); } return getSendLink() .flatMap(link -> link.getLinkSize().flatMap(size -> { int maxSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMap(managementNode -> managementNode.schedule(message, scheduledEnqueueTime, maxSize, link.getLinkName(), transactionContext)); })); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} the finishes this operation on service bus resource. */ private Mono<Void> sendInternal(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(batch)) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; if (batch.getMessages().isEmpty()) { logger.info("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.info("Sending batch with size[{}].", batch.getCount()); Context sharedContext = null; final List<org.apache.qpid.proton.message.Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getMessages().size(); i++) { final ServiceBusMessage event = batch.getMessages().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } final org.apache.qpid.proton.message.Message message = messageSerializer.serialize(event); final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); message.setMessageAnnotations(messageAnnotations); messages.add(message); } final Context finalSharedContext = sharedContext != null ? sharedContext : Context.NONE; return withRetry( getSendLink().flatMap(link -> { if (isTracingEnabled) { Context entityContext = finalSharedContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } if (transactionContext != null && transactionContext.getTransactionId() != null) { final TransactionalState deliveryState = new TransactionalState(); deliveryState.setTxnId(new Binary(transactionContext.getTransactionId().array())); return messages.size() == 1 ? link.send(messages.get(0), deliveryState) : link.send(messages, deliveryState); } else { return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); } }) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }) .doOnError(error -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), Signal.error(error)); } }), retryOptions.getTryTimeout(), retryPolicy); } private Mono<Void> sendInternal(Flux<ServiceBusMessage> messages, ServiceBusTransactionContext transactionContext) { return getSendLink() .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setMaximumSizeInBytes(batchSize); return messages.collect(new AmqpMessageCollector(batchOptions, 1, link::getErrorContext, tracerProvider, messageSerializer)); }) .flatMap(list -> sendInternalBatch(Flux.fromIterable(list), transactionContext))); } private Mono<Void> sendInternalBatch(Flux<ServiceBusMessageBatch> eventBatches, ServiceBusTransactionContext transactionContext) { return eventBatches .flatMap(messageBatch -> sendInternal(messageBatch, transactionContext)) .then() .doOnError(error -> logger.error("Error sending batch.", error)); } private static class AmqpMessageCollector implements Collector<ServiceBusMessage, List<ServiceBusMessageBatch>, List<ServiceBusMessageBatch>> { private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final MessageSerializer serializer; private volatile ServiceBusMessageBatch currentBatch; AmqpMessageCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, MessageSerializer serializer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.serializer = serializer; currentBatch = new ServiceBusMessageBatch(maxMessageSize, contextProvider, tracerProvider, serializer); } @Override public Supplier<List<ServiceBusMessageBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<ServiceBusMessageBatch>, ServiceBusMessage> accumulator() { return (list, event) -> { ServiceBusMessageBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new ServiceBusMessageBatch(maxMessageSize, contextProvider, tracerProvider, serializer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<ServiceBusMessageBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<ServiceBusMessageBatch>, List<ServiceBusMessageBatch>> finisher() { return list -> { ServiceBusMessageBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
This should be scoped to when the build method is actually invoked.
void viaQueueNameWithTopicNotAllowed() { assertThrows(IllegalArgumentException.class, () -> { final ServiceBusClientBuilder builder = new ServiceBusClientBuilder(); builder.connectionString(NAMESPACE_CONNECTION_STRING) .sender() .topicName(TOPIC_NAME) .viaQueueName(VIA_QUEUE_NAME) .buildAsyncClient(); }); }
builder.connectionString(NAMESPACE_CONNECTION_STRING)
void viaQueueNameWithTopicNotAllowed() { ServiceBusSenderClientBuilder builder = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .sender() .topicName(TOPIC_NAME) .viaQueueName(VIA_QUEUE_NAME); assertThrows(IllegalStateException.class, () -> builder.buildAsyncClient()); }
class ServiceBusClientBuilderTest { private static final String NAMESPACE_NAME = "dummyNamespaceName"; private static final String DEFAULT_DOMAIN_NAME = "servicebus.windows.net/"; private static final String ENDPOINT_FORMAT = "sb: private static final String QUEUE_NAME = "test-queue-name"; private static final String VIA_QUEUE_NAME = "test-via-queue-name"; private static final String TOPIC_NAME = "test-topic-name"; private static final String SHARED_ACCESS_KEY_NAME = "dummySasKeyName"; private static final String SHARED_ACCESS_KEY = "dummySasKey"; private static final String ENDPOINT = getUri(ENDPOINT_FORMAT, NAMESPACE_NAME, DEFAULT_DOMAIN_NAME).toString(); private static final String PROXY_HOST = "127.0.0.1"; private static final String PROXY_PORT = "3128"; private static final String NAMESPACE_CONNECTION_STRING = String.format("Endpoint=%s;SharedAccessKeyName=%s;SharedAccessKey=%s", ENDPOINT, SHARED_ACCESS_KEY_NAME, SHARED_ACCESS_KEY); private static final String ENTITY_PATH_CONNECTION_STRING = String.format("Endpoint=%s;SharedAccessKeyName=%s;SharedAccessKey=%s;EntityPath=%s", ENDPOINT, SHARED_ACCESS_KEY_NAME, SHARED_ACCESS_KEY, QUEUE_NAME); private static final Proxy PROXY_ADDRESS = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(PROXY_HOST, Integer.parseInt(PROXY_PORT))); @Test @Test void queueClientWithViaQueueName() { final ServiceBusClientBuilder builder = new ServiceBusClientBuilder(); final ServiceBusSenderAsyncClient client = builder.connectionString(NAMESPACE_CONNECTION_STRING) .sender() .queueName(QUEUE_NAME) .viaQueueName(VIA_QUEUE_NAME) .buildAsyncClient(); assertNotNull(client); } @Test void missingConnectionString() { assertThrows(IllegalArgumentException.class, () -> { final ServiceBusClientBuilder builder = new ServiceBusClientBuilder(); builder.sender() .queueName(QUEUE_NAME) .buildAsyncClient(); }); } @Test void defaultProxyConfigurationBuilder() { final ServiceBusClientBuilder builder = new ServiceBusClientBuilder(); final ServiceBusSenderAsyncClient client = builder.connectionString(NAMESPACE_CONNECTION_STRING) .sender() .queueName(QUEUE_NAME) .buildAsyncClient(); assertNotNull(client); } @Test void customNoneProxyConfigurationBuilder() { final ProxyOptions proxyConfig = new ProxyOptions(ProxyAuthenticationType.NONE, PROXY_ADDRESS, null, null); final ServiceBusSenderClientBuilder builder = new ServiceBusClientBuilder() .connectionString(ENTITY_PATH_CONNECTION_STRING) .proxyOptions(proxyConfig) .transportType(AmqpTransportType.AMQP_WEB_SOCKETS) .sender(); assertNotNull(builder.buildAsyncClient()); } @Test void throwsWithProxyWhenTransportTypeNotChanged() { assertThrows(IllegalArgumentException.class, () -> { final ProxyOptions proxyConfig = new ProxyOptions(ProxyAuthenticationType.BASIC, PROXY_ADDRESS, null, null); final ServiceBusClientBuilder builder = new ServiceBusClientBuilder() .connectionString(ENTITY_PATH_CONNECTION_STRING) .proxyOptions(proxyConfig); assertNotNull(builder.sender().buildAsyncClient()); }); } private static Stream<Arguments> invalidEntityPathConfigurations() { return Stream.of( Arguments.of(NAMESPACE_CONNECTION_STRING, null, null, null), Arguments.of(NAMESPACE_CONNECTION_STRING, "baz", "bar", "foo"), Arguments.of(ENTITY_PATH_CONNECTION_STRING, "baz", null, null), Arguments.of(ENTITY_PATH_CONNECTION_STRING, null, "bar", "foo")); } /** * Tests different invalid entity path scenarios. */ @ParameterizedTest @MethodSource void invalidEntityPathConfigurations(String connectionString, String topicName, String queueName, String subscriptionName) { final ServiceBusSenderClientBuilder senderBuilder = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .sender(); final ServiceBusReceiverClientBuilder receiverBuilder = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .receiver(); assertThrows(IllegalStateException.class, senderBuilder::buildAsyncClient); assertThrows(IllegalStateException.class, receiverBuilder::buildAsyncClient); } /** * Throws when topic name is set for receiver, but no subscription name is set. */ @Test void throwsWhenSubscriptionNameNotSet() { final ServiceBusReceiverClientBuilder receiverBuilder = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .receiver() .topicName("baz"); assertThrows(IllegalStateException.class, receiverBuilder::buildAsyncClient); } /** * Throws when the prefetch is less than 1. */ @Test void invalidPrefetch() { final ServiceBusReceiverClientBuilder receiverBuilder = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .receiver() .topicName("baz").subscriptionName("bar") .receiveMode(ReceiveMode.PEEK_LOCK) .prefetchCount(0); assertThrows(IllegalArgumentException.class, receiverBuilder::buildAsyncClient); } @MethodSource("getProxyConfigurations") @ParameterizedTest public void testProxyOptionsConfiguration(String proxyConfiguration, boolean expectedClientCreation) { Configuration configuration = Configuration.getGlobalConfiguration().clone(); configuration = configuration.put(Configuration.PROPERTY_HTTP_PROXY, proxyConfiguration); boolean clientCreated = false; try { ServiceBusReceiverClient syncClient = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .configuration(configuration) .receiver() .topicName("baz").subscriptionName("bar") .receiveMode(ReceiveMode.PEEK_LOCK) .buildClient(); clientCreated = true; } catch (Exception ex) { } Assertions.assertEquals(expectedClientCreation, clientCreated); } private static Stream<Arguments> getProxyConfigurations() { return Stream.of( Arguments.of("http: Arguments.of("localhost:8080", true), Arguments.of("localhost_8080", false), Arguments.of("http: Arguments.of("http: Arguments.of(":8080", false), Arguments.of("http: Arguments.of("sub.example.com:8080", true), Arguments.of("https: Arguments.of("https: ); } private static URI getUri(String endpointFormat, String namespace, String domainName) { try { return new URI(String.format(Locale.US, endpointFormat, namespace, domainName)); } catch (URISyntaxException exception) { throw new IllegalArgumentException(String.format(Locale.US, "Invalid namespace name: %s", namespace), exception); } } }
class ServiceBusClientBuilderTest { private static final String NAMESPACE_NAME = "dummyNamespaceName"; private static final String DEFAULT_DOMAIN_NAME = "servicebus.windows.net/"; private static final String ENDPOINT_FORMAT = "sb: private static final String QUEUE_NAME = "test-queue-name"; private static final String VIA_QUEUE_NAME = "test-via-queue-name"; private static final String TOPIC_NAME = "test-topic-name"; private static final String SHARED_ACCESS_KEY_NAME = "dummySasKeyName"; private static final String SHARED_ACCESS_KEY = "dummySasKey"; private static final String ENDPOINT = getUri(ENDPOINT_FORMAT, NAMESPACE_NAME, DEFAULT_DOMAIN_NAME).toString(); private static final String PROXY_HOST = "127.0.0.1"; private static final String PROXY_PORT = "3128"; private static final String NAMESPACE_CONNECTION_STRING = String.format("Endpoint=%s;SharedAccessKeyName=%s;SharedAccessKey=%s", ENDPOINT, SHARED_ACCESS_KEY_NAME, SHARED_ACCESS_KEY); private static final String ENTITY_PATH_CONNECTION_STRING = String.format("Endpoint=%s;SharedAccessKeyName=%s;SharedAccessKey=%s;EntityPath=%s", ENDPOINT, SHARED_ACCESS_KEY_NAME, SHARED_ACCESS_KEY, QUEUE_NAME); private static final Proxy PROXY_ADDRESS = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(PROXY_HOST, Integer.parseInt(PROXY_PORT))); @Test @Test void queueClientWithViaQueueName() { final ServiceBusSenderClientBuilder builder = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .sender() .queueName(QUEUE_NAME) .viaQueueName(VIA_QUEUE_NAME); final ServiceBusSenderAsyncClient client = builder.buildAsyncClient(); assertNotNull(client); } @Test void missingConnectionString() { assertThrows(IllegalArgumentException.class, () -> { final ServiceBusClientBuilder builder = new ServiceBusClientBuilder(); builder.sender() .queueName(QUEUE_NAME) .buildAsyncClient(); }); } @Test void defaultProxyConfigurationBuilder() { final ServiceBusClientBuilder builder = new ServiceBusClientBuilder(); final ServiceBusSenderAsyncClient client = builder.connectionString(NAMESPACE_CONNECTION_STRING) .sender() .queueName(QUEUE_NAME) .buildAsyncClient(); assertNotNull(client); } @Test void customNoneProxyConfigurationBuilder() { final ProxyOptions proxyConfig = new ProxyOptions(ProxyAuthenticationType.NONE, PROXY_ADDRESS, null, null); final ServiceBusSenderClientBuilder builder = new ServiceBusClientBuilder() .connectionString(ENTITY_PATH_CONNECTION_STRING) .proxyOptions(proxyConfig) .transportType(AmqpTransportType.AMQP_WEB_SOCKETS) .sender(); assertNotNull(builder.buildAsyncClient()); } @Test void throwsWithProxyWhenTransportTypeNotChanged() { assertThrows(IllegalArgumentException.class, () -> { final ProxyOptions proxyConfig = new ProxyOptions(ProxyAuthenticationType.BASIC, PROXY_ADDRESS, null, null); final ServiceBusClientBuilder builder = new ServiceBusClientBuilder() .connectionString(ENTITY_PATH_CONNECTION_STRING) .proxyOptions(proxyConfig); assertNotNull(builder.sender().buildAsyncClient()); }); } private static Stream<Arguments> invalidEntityPathConfigurations() { return Stream.of( Arguments.of(NAMESPACE_CONNECTION_STRING, null, null, null), Arguments.of(NAMESPACE_CONNECTION_STRING, "baz", "bar", "foo"), Arguments.of(ENTITY_PATH_CONNECTION_STRING, "baz", null, null), Arguments.of(ENTITY_PATH_CONNECTION_STRING, null, "bar", "foo")); } /** * Tests different invalid entity path scenarios. */ @ParameterizedTest @MethodSource void invalidEntityPathConfigurations(String connectionString, String topicName, String queueName, String subscriptionName) { final ServiceBusSenderClientBuilder senderBuilder = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .sender(); final ServiceBusReceiverClientBuilder receiverBuilder = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .receiver(); assertThrows(IllegalStateException.class, senderBuilder::buildAsyncClient); assertThrows(IllegalStateException.class, receiverBuilder::buildAsyncClient); } /** * Throws when topic name is set for receiver, but no subscription name is set. */ @Test void throwsWhenSubscriptionNameNotSet() { final ServiceBusReceiverClientBuilder receiverBuilder = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .receiver() .topicName("baz"); assertThrows(IllegalStateException.class, receiverBuilder::buildAsyncClient); } /** * Throws when the prefetch is less than 1. */ @Test void invalidPrefetch() { final ServiceBusReceiverClientBuilder receiverBuilder = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .receiver() .topicName("baz").subscriptionName("bar") .receiveMode(ReceiveMode.PEEK_LOCK) .prefetchCount(0); assertThrows(IllegalArgumentException.class, receiverBuilder::buildAsyncClient); } @MethodSource("getProxyConfigurations") @ParameterizedTest public void testProxyOptionsConfiguration(String proxyConfiguration, boolean expectedClientCreation) { Configuration configuration = Configuration.getGlobalConfiguration().clone(); configuration = configuration.put(Configuration.PROPERTY_HTTP_PROXY, proxyConfiguration); boolean clientCreated = false; try { ServiceBusReceiverClient syncClient = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .configuration(configuration) .receiver() .topicName("baz").subscriptionName("bar") .receiveMode(ReceiveMode.PEEK_LOCK) .buildClient(); clientCreated = true; } catch (Exception ex) { } Assertions.assertEquals(expectedClientCreation, clientCreated); } private static Stream<Arguments> getProxyConfigurations() { return Stream.of( Arguments.of("http: Arguments.of("localhost:8080", true), Arguments.of("localhost_8080", false), Arguments.of("http: Arguments.of("http: Arguments.of(":8080", false), Arguments.of("http: Arguments.of("sub.example.com:8080", true), Arguments.of("https: Arguments.of("https: ); } private static URI getUri(String endpointFormat, String namespace, String domainName) { try { return new URI(String.format(Locale.US, endpointFormat, namespace, domainName)); } catch (URISyntaxException exception) { throw new IllegalArgumentException(String.format(Locale.US, "Invalid namespace name: %s", namespace), exception); } } }
This link name doesn't have to be declared. You only use the corresponding value within the if statement for each of them.
private Mono<AmqpSendLink> getSendLink() { return connectionProcessor .flatMap(connection -> { String linkName = entityName; if (!CoreUtils.isNullOrEmpty(viaEntityName)) { linkName = "VIA-".concat(viaEntityName); return connection.createSendLink(linkName, viaEntityName, retryOptions, entityName); } else { return connection.createSendLink(linkName, entityName, retryOptions, null); } }) .doOnNext(next -> linkName.compareAndSet(null, next.getLinkName())); }
String linkName = entityName;
private Mono<AmqpSendLink> getSendLink() { return connectionProcessor .flatMap(connection -> { if (!CoreUtils.isNullOrEmpty(viaEntityName)) { return connection.createSendLink("VIA-".concat(viaEntityName), viaEntityName, retryOptions, entityName); } else { return connection.createSendLink(entityName, entityName, retryOptions, null); } }) .doOnNext(next -> linkName.compareAndSet(null, next.getLinkName())); }
class ServiceBusSenderAsyncClient implements AutoCloseable { /** * The default maximum allowable size, in bytes, for a batch to be sent. */ static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final String TRANSACTION_LINK_NAME = "coordinator"; private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(ServiceBusSenderAsyncClient.class); private final AtomicReference<String> linkName = new AtomicReference<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final MessagingEntityType entityType; private final Runnable onClientClose; private final String entityName; private final ServiceBusConnectionProcessor connectionProcessor; private final String viaEntityName; /** * Creates a new instance of this {@link ServiceBusSenderAsyncClient} that sends messages to a Service Bus entity. */ ServiceBusSenderAsyncClient(String entityName, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose, String viaEntityName) { this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.entityName = Objects.requireNonNull(entityName, "'entityPath' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = tracerProvider; this.retryPolicy = getRetryPolicy(retryOptions); this.entityType = entityType; this.viaEntityName = viaEntityName; this.onClientClose = onClientClose; } /** * Gets the fully qualified namespace. * * @return The fully qualified namespace. */ public String getFullyQualifiedNamespace() { return connectionProcessor.getFullyQualifiedNamespace(); } /** * Gets the name of the Service Bus resource. * * @return The name of the Service Bus resource. */ public String getEntityPath() { return entityName; } /** * Sends a message to a Service Bus queue or topic. * * @param message Message to be sent to Service Bus queue or topic. * * @return The {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code message} is {@code null}. */ public Mono<Void> send(ServiceBusMessage message) { if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } return sendInternal(Flux.just(message), null); } /** * Sends a message to a Service Bus queue or topic. * * @param message Message to be sent to Service Bus queue or topic. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return The {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code message} is {@code null}. */ public Mono<Void> send(ServiceBusMessage message, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendInternal(Flux.just(message), transactionContext); } /** * Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages * exceed the maximum size of a single batch, an exception will be triggered and the send will fail. * By default, the message size is the max amount allowed on the link. * * @param messages Messages to be sent to Service Bus queue or topic. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource. * * @throws NullPointerException if {@code batch}, {@code transactionContext} or * {@code transactionContext.transactionID} is {@code null}. * @throws AmqpException if {@code messages} is larger than the maximum allowed size of a single batch. */ public Mono<Void> send(Iterable<ServiceBusMessage> messages, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendIterable(messages, transactionContext); } /** * Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages exceed * the maximum size of a single batch, an exception will be triggered and the send will fail. By default, the * message size is the max amount allowed on the link. * * @param messages Messages to be sent to Service Bus queue or topic. * * @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource. * * @throws NullPointerException if {@code messages} is {@code null}. * @throws AmqpException if {@code messages} is larger than the maximum allowed size of a single batch. */ public Mono<Void> send(Iterable<ServiceBusMessage> messages) { return sendIterable(messages, null); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * * @return A {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code batch} is {@code null}. */ public Mono<Void> send(ServiceBusMessageBatch batch) { return sendInternal(batch, null); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code batch}, {@code transactionContext} or * {@code transactionContext.transactionID} is {@code null}. */ public Mono<Void> send(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendInternal(batch, transactionContext); } /** * Creates a {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows. * * @return A {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows. */ public Mono<ServiceBusMessageBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link ServiceBusMessageBatch} configured with the options specified. * * @param options A set of options used to configure the {@link ServiceBusMessageBatch}. * * @return A new {@link ServiceBusMessageBatch} configured with the given options. * @throws NullPointerException if {@code options} is null. */ public Mono<ServiceBusMessageBatch> createBatch(CreateBatchOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final int maxSize = options.getMaximumSizeInBytes(); return getSendLink().flatMap(link -> link.getLinkSize().flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (maxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getMaximumSizeInBytes (%s bytes) is larger than the link size" + " (%s bytes).", maxSize, maximumLinkSize))); } final int batchSize = maxSize > 0 ? maxSize : maximumLinkSize; return Mono.just( new ServiceBusMessageBatch(batchSize, link::getErrorContext, tracerProvider, messageSerializer)); })); } /** * Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is * enqueued and made available to receivers only at the scheduled enqueue time. * * @param message Message to be sent to the Service Bus Queue. * @param scheduledEnqueueTime Instant at which the message should appear in the Service Bus queue or topic. * @param transactionContext to be set on message before sending to Service Bus. * * @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message. * * @throws NullPointerException if {@code message}, {@code scheduledEnqueueTime}, {@code transactionContext} or * {@code transactionContext.transactionID} is {@code null}. */ public Mono<Long> scheduleMessage(ServiceBusMessage message, Instant scheduledEnqueueTime, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return scheduleMessageInternal(message, scheduledEnqueueTime, transactionContext); } /** * Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is * enqueued and made available to receivers only at the scheduled enqueue time. * * @param message Message to be sent to the Service Bus Queue. * @param scheduledEnqueueTime Instant at which the message should appear in the Service Bus queue or topic. * * @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message. * * @throws NullPointerException if {@code message} or {@code scheduledEnqueueTime} is {@code null}. */ public Mono<Long> scheduleMessage(ServiceBusMessage message, Instant scheduledEnqueueTime) { return scheduleMessageInternal(message, scheduledEnqueueTime, null); } /** * Cancels the enqueuing of an already scheduled message, if it was not already enqueued. * * @param sequenceNumber of the scheduled message to cancel. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws IllegalArgumentException if {@code sequenceNumber} is negative. */ public Mono<Void> cancelScheduledMessage(long sequenceNumber) { if (sequenceNumber < 0) { return monoError(logger, new IllegalArgumentException("'sequenceNumber' cannot be negative.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMap(managementNode -> managementNode.cancelScheduledMessage(sequenceNumber, linkName.get())); } /** * Starts a new transaction on Service Bus. The {@link ServiceBusTransactionContext} should be passed along with * {@link ServiceBusReceivedMessage} or {@link MessageLockToken} to all operations that needs to be in * this transaction. * * @return a new {@link ServiceBusTransactionContext}. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())); } /** * Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * * @param transactionContext to be committed. * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * * @param transactionContext to be rollbacked. * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Disposes of the {@link ServiceBusSenderAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } onClientClose.run(); } private Mono<Void> sendIterable(Iterable<ServiceBusMessage> messages, ServiceBusTransactionContext transaction) { if (Objects.isNull(messages)) { return monoError(logger, new NullPointerException("'messages' cannot be null.")); } return createBatch().flatMap(messageBatch -> { messages.forEach(message -> messageBatch.tryAdd(message)); return sendInternal(messageBatch, transaction); }); } private Mono<Long> scheduleMessageInternal(ServiceBusMessage message, Instant scheduledEnqueueTime, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } if (Objects.isNull(scheduledEnqueueTime)) { return monoError(logger, new NullPointerException("'scheduledEnqueueTime' cannot be null.")); } return getSendLink() .flatMap(link -> link.getLinkSize().flatMap(size -> { int maxSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMap(managementNode -> managementNode.schedule(message, scheduledEnqueueTime, maxSize, link.getLinkName(), transactionContext)); })); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} the finishes this operation on service bus resource. */ private Mono<Void> sendInternal(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(batch)) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; if (batch.getMessages().isEmpty()) { logger.info("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.info("Sending batch with size[{}].", batch.getCount()); Context sharedContext = null; final List<org.apache.qpid.proton.message.Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getMessages().size(); i++) { final ServiceBusMessage event = batch.getMessages().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } final org.apache.qpid.proton.message.Message message = messageSerializer.serialize(event); final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); message.setMessageAnnotations(messageAnnotations); messages.add(message); } final Context finalSharedContext = sharedContext != null ? sharedContext : Context.NONE; return withRetry( getSendLink().flatMap(link -> { if (isTracingEnabled) { Context entityContext = finalSharedContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } if (transactionContext != null && transactionContext.getTransactionId() != null) { final TransactionalState deliveryState = new TransactionalState(); deliveryState.setTxnId(new Binary(transactionContext.getTransactionId().array())); return messages.size() == 1 ? link.send(messages.get(0), deliveryState) : link.send(messages, deliveryState); } else { return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); } }) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }) .doOnError(error -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), Signal.error(error)); } }), retryOptions.getTryTimeout(), retryPolicy); } private Mono<Void> sendInternal(Flux<ServiceBusMessage> messages, ServiceBusTransactionContext transactionContext) { return getSendLink() .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setMaximumSizeInBytes(batchSize); return messages.collect(new AmqpMessageCollector(batchOptions, 1, link::getErrorContext, tracerProvider, messageSerializer)); }) .flatMap(list -> sendInternalBatch(Flux.fromIterable(list), transactionContext))); } private Mono<Void> sendInternalBatch(Flux<ServiceBusMessageBatch> eventBatches, ServiceBusTransactionContext transactionContext) { return eventBatches .flatMap(messageBatch -> sendInternal(messageBatch, transactionContext)) .then() .doOnError(error -> logger.error("Error sending batch.", error)); } private static class AmqpMessageCollector implements Collector<ServiceBusMessage, List<ServiceBusMessageBatch>, List<ServiceBusMessageBatch>> { private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final MessageSerializer serializer; private volatile ServiceBusMessageBatch currentBatch; AmqpMessageCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, MessageSerializer serializer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.serializer = serializer; currentBatch = new ServiceBusMessageBatch(maxMessageSize, contextProvider, tracerProvider, serializer); } @Override public Supplier<List<ServiceBusMessageBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<ServiceBusMessageBatch>, ServiceBusMessage> accumulator() { return (list, event) -> { ServiceBusMessageBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new ServiceBusMessageBatch(maxMessageSize, contextProvider, tracerProvider, serializer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<ServiceBusMessageBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<ServiceBusMessageBatch>, List<ServiceBusMessageBatch>> finisher() { return list -> { ServiceBusMessageBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class ServiceBusSenderAsyncClient implements AutoCloseable { /** * The default maximum allowable size, in bytes, for a batch to be sent. */ static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final String TRANSACTION_LINK_NAME = "coordinator"; private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(ServiceBusSenderAsyncClient.class); private final AtomicReference<String> linkName = new AtomicReference<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final MessagingEntityType entityType; private final Runnable onClientClose; private final String entityName; private final ServiceBusConnectionProcessor connectionProcessor; private final String viaEntityName; /** * Creates a new instance of this {@link ServiceBusSenderAsyncClient} that sends messages to a Service Bus entity. */ ServiceBusSenderAsyncClient(String entityName, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose, String viaEntityName) { this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.entityName = Objects.requireNonNull(entityName, "'entityPath' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = tracerProvider; this.retryPolicy = getRetryPolicy(retryOptions); this.entityType = entityType; this.viaEntityName = viaEntityName; this.onClientClose = onClientClose; } /** * Gets the fully qualified namespace. * * @return The fully qualified namespace. */ public String getFullyQualifiedNamespace() { return connectionProcessor.getFullyQualifiedNamespace(); } /** * Gets the name of the Service Bus resource. * * @return The name of the Service Bus resource. */ public String getEntityPath() { return entityName; } /** * Sends a message to a Service Bus queue or topic. * * @param message Message to be sent to Service Bus queue or topic. * * @return The {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code message} is {@code null}. */ public Mono<Void> sendMessage(ServiceBusMessage message) { if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } return sendInternal(Flux.just(message), null); } /** * Sends a message to a Service Bus queue or topic. * * @param message Message to be sent to Service Bus queue or topic. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return The {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code message} is {@code null}. */ public Mono<Void> sendMessage(ServiceBusMessage message, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendInternal(Flux.just(message), transactionContext); } /** * Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages * exceed the maximum size of a single batch, an exception will be triggered and the send will fail. * By default, the message size is the max amount allowed on the link. * * @param messages Messages to be sent to Service Bus queue or topic. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource. * * @throws NullPointerException if {@code batch}, {@code transactionContext} or * {@code transactionContext.transactionID} is {@code null}. * @throws AmqpException if {@code messages} is larger than the maximum allowed size of a single batch. */ public Mono<Void> sendMessages(Iterable<ServiceBusMessage> messages, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendIterable(messages, transactionContext); } /** * Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages exceed * the maximum size of a single batch, an exception will be triggered and the send will fail. By default, the * message size is the max amount allowed on the link. * * @param messages Messages to be sent to Service Bus queue or topic. * * @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource. * * @throws NullPointerException if {@code messages} is {@code null}. * @throws AmqpException if {@code messages} is larger than the maximum allowed size of a single batch. */ public Mono<Void> sendMessages(Iterable<ServiceBusMessage> messages) { return sendIterable(messages, null); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * * @return A {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code batch} is {@code null}. */ public Mono<Void> sendMessages(ServiceBusMessageBatch batch) { return sendInternal(batch, null); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code batch}, {@code transactionContext} or * {@code transactionContext.transactionID} is {@code null}. */ public Mono<Void> sendMessages(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendInternal(batch, transactionContext); } /** * Creates a {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows. * * @return A {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows. */ public Mono<ServiceBusMessageBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link ServiceBusMessageBatch} configured with the options specified. * * @param options A set of options used to configure the {@link ServiceBusMessageBatch}. * * @return A new {@link ServiceBusMessageBatch} configured with the given options. * @throws NullPointerException if {@code options} is null. */ public Mono<ServiceBusMessageBatch> createBatch(CreateBatchOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final int maxSize = options.getMaximumSizeInBytes(); return getSendLink().flatMap(link -> link.getLinkSize().flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (maxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getMaximumSizeInBytes (%s bytes) is larger than the link size" + " (%s bytes).", maxSize, maximumLinkSize))); } final int batchSize = maxSize > 0 ? maxSize : maximumLinkSize; return Mono.just( new ServiceBusMessageBatch(batchSize, link::getErrorContext, tracerProvider, messageSerializer)); })); } /** * Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is * enqueued and made available to receivers only at the scheduled enqueue time. * * @param message Message to be sent to the Service Bus Queue. * @param scheduledEnqueueTime Instant at which the message should appear in the Service Bus queue or topic. * @param transactionContext to be set on message before sending to Service Bus. * * @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message. * * @throws NullPointerException if {@code message}, {@code scheduledEnqueueTime}, {@code transactionContext} or * {@code transactionContext.transactionID} is {@code null}. */ public Mono<Long> scheduleMessage(ServiceBusMessage message, Instant scheduledEnqueueTime, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return scheduleMessageInternal(message, scheduledEnqueueTime, transactionContext); } /** * Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is * enqueued and made available to receivers only at the scheduled enqueue time. * * @param message Message to be sent to the Service Bus Queue. * @param scheduledEnqueueTime Instant at which the message should appear in the Service Bus queue or topic. * * @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message. * * @throws NullPointerException if {@code message} or {@code scheduledEnqueueTime} is {@code null}. */ public Mono<Long> scheduleMessage(ServiceBusMessage message, Instant scheduledEnqueueTime) { return scheduleMessageInternal(message, scheduledEnqueueTime, null); } /** * Cancels the enqueuing of an already scheduled message, if it was not already enqueued. * * @param sequenceNumber of the scheduled message to cancel. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws IllegalArgumentException if {@code sequenceNumber} is negative. */ public Mono<Void> cancelScheduledMessage(long sequenceNumber) { if (sequenceNumber < 0) { return monoError(logger, new IllegalArgumentException("'sequenceNumber' cannot be negative.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMap(managementNode -> managementNode.cancelScheduledMessage(sequenceNumber, linkName.get())); } /** * Starts a new transaction on Service Bus. The {@link ServiceBusTransactionContext} should be passed along with * {@link ServiceBusReceivedMessage} or {@code lockToken } to all operations that needs to be in * this transaction. * * @return a new {@link ServiceBusTransactionContext}. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())); } /** * Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * * @param transactionContext to be committed. * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * * @param transactionContext to be rollbacked. * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Disposes of the {@link ServiceBusSenderAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } onClientClose.run(); } private Mono<Void> sendIterable(Iterable<ServiceBusMessage> messages, ServiceBusTransactionContext transaction) { if (Objects.isNull(messages)) { return monoError(logger, new NullPointerException("'messages' cannot be null.")); } return createBatch().flatMap(messageBatch -> { messages.forEach(message -> messageBatch.tryAdd(message)); return sendInternal(messageBatch, transaction); }); } private Mono<Long> scheduleMessageInternal(ServiceBusMessage message, Instant scheduledEnqueueTime, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } if (Objects.isNull(scheduledEnqueueTime)) { return monoError(logger, new NullPointerException("'scheduledEnqueueTime' cannot be null.")); } return getSendLink() .flatMap(link -> link.getLinkSize().flatMap(size -> { int maxSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMap(managementNode -> managementNode.schedule(message, scheduledEnqueueTime, maxSize, link.getLinkName(), transactionContext)); })); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} the finishes this operation on service bus resource. */ private Mono<Void> sendInternal(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(batch)) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; if (batch.getMessages().isEmpty()) { logger.info("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.info("Sending batch with size[{}].", batch.getCount()); Context sharedContext = null; final List<org.apache.qpid.proton.message.Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getMessages().size(); i++) { final ServiceBusMessage event = batch.getMessages().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } final org.apache.qpid.proton.message.Message message = messageSerializer.serialize(event); final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); message.setMessageAnnotations(messageAnnotations); messages.add(message); } final Context finalSharedContext = sharedContext != null ? sharedContext : Context.NONE; return withRetry( getSendLink().flatMap(link -> { if (isTracingEnabled) { Context entityContext = finalSharedContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } if (transactionContext != null && transactionContext.getTransactionId() != null) { final TransactionalState deliveryState = new TransactionalState(); deliveryState.setTxnId(new Binary(transactionContext.getTransactionId().array())); return messages.size() == 1 ? link.send(messages.get(0), deliveryState) : link.send(messages, deliveryState); } else { return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); } }) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }) .doOnError(error -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), Signal.error(error)); } }), retryOptions.getTryTimeout(), retryPolicy); } private Mono<Void> sendInternal(Flux<ServiceBusMessage> messages, ServiceBusTransactionContext transactionContext) { return getSendLink() .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setMaximumSizeInBytes(batchSize); return messages.collect(new AmqpMessageCollector(batchOptions, 1, link::getErrorContext, tracerProvider, messageSerializer)); }) .flatMap(list -> sendInternalBatch(Flux.fromIterable(list), transactionContext))); } private Mono<Void> sendInternalBatch(Flux<ServiceBusMessageBatch> eventBatches, ServiceBusTransactionContext transactionContext) { return eventBatches .flatMap(messageBatch -> sendInternal(messageBatch, transactionContext)) .then() .doOnError(error -> logger.error("Error sending batch.", error)); } private static class AmqpMessageCollector implements Collector<ServiceBusMessage, List<ServiceBusMessageBatch>, List<ServiceBusMessageBatch>> { private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final MessageSerializer serializer; private volatile ServiceBusMessageBatch currentBatch; AmqpMessageCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, MessageSerializer serializer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.serializer = serializer; currentBatch = new ServiceBusMessageBatch(maxMessageSize, contextProvider, tracerProvider, serializer); } @Override public Supplier<List<ServiceBusMessageBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<ServiceBusMessageBatch>, ServiceBusMessage> accumulator() { return (list, event) -> { ServiceBusMessageBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new ServiceBusMessageBatch(maxMessageSize, contextProvider, tracerProvider, serializer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<ServiceBusMessageBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<ServiceBusMessageBatch>, List<ServiceBusMessageBatch>> finisher() { return list -> { ServiceBusMessageBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Does the JVM check both environment and system properties when allowing these headers to be used? If not we should use whichever it uses directly as `Configuration` will lookup against both.
private Set<String> getAllowRestrictedHeaders() { Properties properties = getNetworkProperties(); String[] allowRestrictedHeadersNetProperties = properties.getProperty(JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS, "").split(","); Configuration config = (this.configuration == null) ? Configuration.getGlobalConfiguration() : configuration; String[] allowRestrictedHeadersSystemProperties = config.get(JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS, "") .split(","); Set<String> allowRestrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); allowRestrictedHeaders.addAll( Arrays.stream(allowRestrictedHeadersSystemProperties) .map(String::trim) .collect(Collectors.toSet())); allowRestrictedHeaders.addAll( Arrays.stream(allowRestrictedHeadersNetProperties) .map(String::trim) .collect(Collectors.toSet())); return allowRestrictedHeaders; }
Configuration config = (this.configuration == null)
private Set<String> getAllowRestrictedHeaders() { Properties properties = getNetworkProperties(); String[] allowRestrictedHeadersNetProperties = properties.getProperty(JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS, "").split(","); Configuration config = (this.configuration == null) ? Configuration.getGlobalConfiguration() : configuration; String[] allowRestrictedHeadersSystemProperties = config.get(JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS, "") .split(","); Set<String> allowRestrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); allowRestrictedHeaders.addAll( Arrays.stream(allowRestrictedHeadersSystemProperties) .map(String::trim) .collect(Collectors.toSet())); allowRestrictedHeaders.addAll( Arrays.stream(allowRestrictedHeadersNetProperties) .map(String::trim) .collect(Collectors.toSet())); return allowRestrictedHeaders; }
class JdkAsyncHttpClientBuilder { private static final Duration DEFAULT_CONNECT_TIMEOUT = Duration.ofSeconds(60); private static final String JAVA_HOME = System.getProperty("java.home"); private static final String JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS = "jdk.httpclient.allowRestrictedHeaders"; static final Set<String> DEFAULT_RESTRICTED_HEADERS; static { TreeSet<String> treeSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); treeSet.addAll(Set.of( "connection", "content-length", "expect", "host", "upgrade" )); DEFAULT_RESTRICTED_HEADERS = Collections.unmodifiableSet(treeSet); } private final ClientLogger logger = new ClientLogger(JdkAsyncHttpClientBuilder.class); private java.net.http.HttpClient.Builder httpClientBuilder; private Duration connectionTimeout; private ProxyOptions proxyOptions; private Configuration configuration; private Executor executor; /** * Creates JdkAsyncHttpClientBuilder. */ public JdkAsyncHttpClientBuilder() { } /** * Creates JdkAsyncHttpClientBuilder from the builder of an existing {@link java.net.http.HttpClient.Builder}. * * @param httpClientBuilder the HttpClient builder to use * @throws NullPointerException if {@code httpClientBuilder} is null */ public JdkAsyncHttpClientBuilder(java.net.http.HttpClient.Builder httpClientBuilder) { this.httpClientBuilder = Objects.requireNonNull(httpClientBuilder, "'httpClientBuilder' cannot be null."); } /** * Sets the executor to be used for asynchronous and dependent tasks. This cannot be null. * * <p> If this method is not invoked prior to {@linkplain * newly built {@code HttpClient}. * * @param executor the executor to be used for asynchronous and dependent tasks * @return the updated JdkAsyncHttpClientBuilder object * @throws NullPointerException if {@code executor} is null */ public JdkAsyncHttpClientBuilder executor(Executor executor) { this.executor = Objects.requireNonNull(executor, "executor can not be null"); return this; } /** * Sets the connection timeout. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.jdk.httpclient.JdkAsyncHttpClientBuilder.connectionTimeout * * The default connection timeout is 60 seconds. * * @param connectionTimeout the connection timeout * @return the updated JdkAsyncHttpClientBuilder object */ public JdkAsyncHttpClientBuilder connectionTimeout(Duration connectionTimeout) { this.connectionTimeout = connectionTimeout; return this; } /** * Sets the proxy. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.jdk.httpclient.JdkAsyncHttpClientBuilder.proxy * * @param proxyOptions The proxy configuration to use. * @return the updated {@link JdkAsyncHttpClientBuilder} object */ public JdkAsyncHttpClientBuilder proxy(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Sets the configuration store that is used during construction of the HTTP client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated JdkAsyncHttpClientBuilder object. */ public JdkAsyncHttpClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Build a HttpClient with current configurations. * * @return a {@link HttpClient}. */ public HttpClient build() { java.net.http.HttpClient.Builder httpClientBuilder = this.httpClientBuilder == null ? java.net.http.HttpClient.newBuilder() : this.httpClientBuilder; httpClientBuilder = (this.connectionTimeout != null) ? httpClientBuilder.connectTimeout(this.connectionTimeout) : httpClientBuilder.connectTimeout(DEFAULT_CONNECT_TIMEOUT); Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; ProxyOptions buildProxyOptions = (proxyOptions == null && buildConfiguration != Configuration.NONE) ? ProxyOptions.fromConfiguration(buildConfiguration) : proxyOptions; if (executor != null) { httpClientBuilder.executor(executor); } if (buildProxyOptions != null) { httpClientBuilder = httpClientBuilder.proxy(new JdkHttpClientProxySelector( buildProxyOptions.getType().toProxyType(), buildProxyOptions.getAddress(), buildProxyOptions.getNonProxyHosts())); if (buildProxyOptions.getUsername() != null) { httpClientBuilder .authenticator(new ProxyAuthenticator(buildProxyOptions.getUsername(), buildProxyOptions.getPassword())); } } return new JdkAsyncHttpClient(httpClientBuilder.build(), Collections.unmodifiableSet(getRestrictedHeaders())); } Set<String> getRestrictedHeaders() { Set<String> allowRestrictedHeaders = getAllowRestrictedHeaders(); Set<String> restrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); restrictedHeaders.addAll(DEFAULT_RESTRICTED_HEADERS); restrictedHeaders.removeAll(allowRestrictedHeaders); return restrictedHeaders; } Properties getNetworkProperties() { Path path = Paths.get(JAVA_HOME, "conf", "net.properties"); Properties properties = new Properties(); try (Reader reader = Files.newBufferedReader(path)) { properties.load(reader); } catch (IOException e) { logger.warning("Cannot read net properties file at path {}", path, e); } return properties; } private static class ProxyAuthenticator extends Authenticator { private final String userName; private final String password; ProxyAuthenticator(String userName, String password) { this.userName = userName; this.password = password; } @Override protected PasswordAuthentication getPasswordAuthentication() { return new PasswordAuthentication(this.userName, password.toCharArray()); } } }
class JdkAsyncHttpClientBuilder { private static final Duration DEFAULT_CONNECT_TIMEOUT = Duration.ofSeconds(60); private static final String JAVA_HOME = System.getProperty("java.home"); private static final String JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS = "jdk.httpclient.allowRestrictedHeaders"; static final Set<String> DEFAULT_RESTRICTED_HEADERS; static { TreeSet<String> treeSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); treeSet.addAll(Set.of( "connection", "content-length", "expect", "host", "upgrade" )); DEFAULT_RESTRICTED_HEADERS = Collections.unmodifiableSet(treeSet); } private final ClientLogger logger = new ClientLogger(JdkAsyncHttpClientBuilder.class); private java.net.http.HttpClient.Builder httpClientBuilder; private Duration connectionTimeout; private ProxyOptions proxyOptions; private Configuration configuration; private Executor executor; /** * Creates JdkAsyncHttpClientBuilder. */ public JdkAsyncHttpClientBuilder() { } /** * Creates JdkAsyncHttpClientBuilder from the builder of an existing {@link java.net.http.HttpClient.Builder}. * * @param httpClientBuilder the HttpClient builder to use * @throws NullPointerException if {@code httpClientBuilder} is null */ public JdkAsyncHttpClientBuilder(java.net.http.HttpClient.Builder httpClientBuilder) { this.httpClientBuilder = Objects.requireNonNull(httpClientBuilder, "'httpClientBuilder' cannot be null."); } /** * Sets the executor to be used for asynchronous and dependent tasks. This cannot be null. * * <p> If this method is not invoked prior to {@linkplain * newly built {@code HttpClient}. * * @param executor the executor to be used for asynchronous and dependent tasks * @return the updated JdkAsyncHttpClientBuilder object * @throws NullPointerException if {@code executor} is null */ public JdkAsyncHttpClientBuilder executor(Executor executor) { this.executor = Objects.requireNonNull(executor, "executor can not be null"); return this; } /** * Sets the connection timeout. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.jdk.httpclient.JdkAsyncHttpClientBuilder.connectionTimeout * * The default connection timeout is 60 seconds. * * @param connectionTimeout the connection timeout * @return the updated JdkAsyncHttpClientBuilder object */ public JdkAsyncHttpClientBuilder connectionTimeout(Duration connectionTimeout) { this.connectionTimeout = connectionTimeout; return this; } /** * Sets the proxy. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.jdk.httpclient.JdkAsyncHttpClientBuilder.proxy * * @param proxyOptions The proxy configuration to use. * @return the updated {@link JdkAsyncHttpClientBuilder} object */ public JdkAsyncHttpClientBuilder proxy(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Sets the configuration store that is used during construction of the HTTP client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated JdkAsyncHttpClientBuilder object. */ public JdkAsyncHttpClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Build a HttpClient with current configurations. * * @return a {@link HttpClient}. */ public HttpClient build() { java.net.http.HttpClient.Builder httpClientBuilder = this.httpClientBuilder == null ? java.net.http.HttpClient.newBuilder() : this.httpClientBuilder; httpClientBuilder = (this.connectionTimeout != null) ? httpClientBuilder.connectTimeout(this.connectionTimeout) : httpClientBuilder.connectTimeout(DEFAULT_CONNECT_TIMEOUT); Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; ProxyOptions buildProxyOptions = (proxyOptions == null && buildConfiguration != Configuration.NONE) ? ProxyOptions.fromConfiguration(buildConfiguration) : proxyOptions; if (executor != null) { httpClientBuilder.executor(executor); } if (buildProxyOptions != null) { httpClientBuilder = httpClientBuilder.proxy(new JdkHttpClientProxySelector( buildProxyOptions.getType().toProxyType(), buildProxyOptions.getAddress(), buildProxyOptions.getNonProxyHosts())); if (buildProxyOptions.getUsername() != null) { httpClientBuilder .authenticator(new ProxyAuthenticator(buildProxyOptions.getUsername(), buildProxyOptions.getPassword())); } } return new JdkAsyncHttpClient(httpClientBuilder.build(), Collections.unmodifiableSet(getRestrictedHeaders())); } Set<String> getRestrictedHeaders() { Set<String> allowRestrictedHeaders = getAllowRestrictedHeaders(); Set<String> restrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); restrictedHeaders.addAll(DEFAULT_RESTRICTED_HEADERS); restrictedHeaders.removeAll(allowRestrictedHeaders); return restrictedHeaders; } Properties getNetworkProperties() { Path path = Paths.get(JAVA_HOME, "conf", "net.properties"); Properties properties = new Properties(); try (Reader reader = Files.newBufferedReader(path)) { properties.load(reader); } catch (IOException e) { logger.warning("Cannot read net properties file at path {}", path, e); } return properties; } private static class ProxyAuthenticator extends Authenticator { private final String userName; private final String password; ProxyAuthenticator(String userName, String password) { this.userName = userName; this.password = password; } @Override protected PasswordAuthentication getPasswordAuthentication() { return new PasswordAuthentication(this.userName, password.toCharArray()); } } }
What's the purpose of having Case_Insensitive_Order?
Set<String> getRestrictedHeaders() { Set<String> allowRestrictedHeaders = getAllowRestrictedHeaders(); Set<String> restrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); restrictedHeaders.addAll(DEFAULT_RESTRICTED_HEADERS); restrictedHeaders.removeAll(allowRestrictedHeaders); return restrictedHeaders; }
Set<String> restrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
Set<String> getRestrictedHeaders() { Set<String> allowRestrictedHeaders = getAllowRestrictedHeaders(); Set<String> restrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); restrictedHeaders.addAll(DEFAULT_RESTRICTED_HEADERS); restrictedHeaders.removeAll(allowRestrictedHeaders); return restrictedHeaders; }
class JdkAsyncHttpClientBuilder { private static final Duration DEFAULT_CONNECT_TIMEOUT = Duration.ofSeconds(60); private static final String JAVA_HOME = System.getProperty("java.home"); private static final String JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS = "jdk.httpclient.allowRestrictedHeaders"; static final Set<String> DEFAULT_RESTRICTED_HEADERS; static { TreeSet<String> treeSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); treeSet.addAll(Set.of( "connection", "content-length", "expect", "host", "upgrade" )); DEFAULT_RESTRICTED_HEADERS = Collections.unmodifiableSet(treeSet); } private final ClientLogger logger = new ClientLogger(JdkAsyncHttpClientBuilder.class); private java.net.http.HttpClient.Builder httpClientBuilder; private Duration connectionTimeout; private ProxyOptions proxyOptions; private Configuration configuration; private Executor executor; /** * Creates JdkAsyncHttpClientBuilder. */ public JdkAsyncHttpClientBuilder() { } /** * Creates JdkAsyncHttpClientBuilder from the builder of an existing {@link java.net.http.HttpClient.Builder}. * * @param httpClientBuilder the HttpClient builder to use * @throws NullPointerException if {@code httpClientBuilder} is null */ public JdkAsyncHttpClientBuilder(java.net.http.HttpClient.Builder httpClientBuilder) { this.httpClientBuilder = Objects.requireNonNull(httpClientBuilder, "'httpClientBuilder' cannot be null."); } /** * Sets the executor to be used for asynchronous and dependent tasks. This cannot be null. * * <p> If this method is not invoked prior to {@linkplain * newly built {@code HttpClient}. * * @param executor the executor to be used for asynchronous and dependent tasks * @return the updated JdkAsyncHttpClientBuilder object * @throws NullPointerException if {@code executor} is null */ public JdkAsyncHttpClientBuilder executor(Executor executor) { this.executor = Objects.requireNonNull(executor, "executor can not be null"); return this; } /** * Sets the connection timeout. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.jdk.httpclient.JdkAsyncHttpClientBuilder.connectionTimeout * * The default connection timeout is 60 seconds. * * @param connectionTimeout the connection timeout * @return the updated JdkAsyncHttpClientBuilder object */ public JdkAsyncHttpClientBuilder connectionTimeout(Duration connectionTimeout) { this.connectionTimeout = connectionTimeout; return this; } /** * Sets the proxy. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.jdk.httpclient.JdkAsyncHttpClientBuilder.proxy * * @param proxyOptions The proxy configuration to use. * @return the updated {@link JdkAsyncHttpClientBuilder} object */ public JdkAsyncHttpClientBuilder proxy(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Sets the configuration store that is used during construction of the HTTP client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated JdkAsyncHttpClientBuilder object. */ public JdkAsyncHttpClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Build a HttpClient with current configurations. * * @return a {@link HttpClient}. */ public HttpClient build() { java.net.http.HttpClient.Builder httpClientBuilder = this.httpClientBuilder == null ? java.net.http.HttpClient.newBuilder() : this.httpClientBuilder; httpClientBuilder = (this.connectionTimeout != null) ? httpClientBuilder.connectTimeout(this.connectionTimeout) : httpClientBuilder.connectTimeout(DEFAULT_CONNECT_TIMEOUT); Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; ProxyOptions buildProxyOptions = (proxyOptions == null && buildConfiguration != Configuration.NONE) ? ProxyOptions.fromConfiguration(buildConfiguration) : proxyOptions; if (executor != null) { httpClientBuilder.executor(executor); } if (buildProxyOptions != null) { httpClientBuilder = httpClientBuilder.proxy(new JdkHttpClientProxySelector( buildProxyOptions.getType().toProxyType(), buildProxyOptions.getAddress(), buildProxyOptions.getNonProxyHosts())); if (buildProxyOptions.getUsername() != null) { httpClientBuilder .authenticator(new ProxyAuthenticator(buildProxyOptions.getUsername(), buildProxyOptions.getPassword())); } } return new JdkAsyncHttpClient(httpClientBuilder.build(), Collections.unmodifiableSet(getRestrictedHeaders())); } private Set<String> getAllowRestrictedHeaders() { Properties properties = getNetworkProperties(); String[] allowRestrictedHeadersNetProperties = properties.getProperty(JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS, "").split(","); Configuration config = (this.configuration == null) ? Configuration.getGlobalConfiguration() : configuration; String[] allowRestrictedHeadersSystemProperties = config.get(JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS, "") .split(","); Set<String> allowRestrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); allowRestrictedHeaders.addAll( Arrays.stream(allowRestrictedHeadersSystemProperties) .map(String::trim) .collect(Collectors.toSet())); allowRestrictedHeaders.addAll( Arrays.stream(allowRestrictedHeadersNetProperties) .map(String::trim) .collect(Collectors.toSet())); return allowRestrictedHeaders; } Properties getNetworkProperties() { Path path = Paths.get(JAVA_HOME, "conf", "net.properties"); Properties properties = new Properties(); try (Reader reader = Files.newBufferedReader(path)) { properties.load(reader); } catch (IOException e) { logger.warning("Cannot read net properties file at path {}", path, e); } return properties; } private static class ProxyAuthenticator extends Authenticator { private final String userName; private final String password; ProxyAuthenticator(String userName, String password) { this.userName = userName; this.password = password; } @Override protected PasswordAuthentication getPasswordAuthentication() { return new PasswordAuthentication(this.userName, password.toCharArray()); } } }
class JdkAsyncHttpClientBuilder { private static final Duration DEFAULT_CONNECT_TIMEOUT = Duration.ofSeconds(60); private static final String JAVA_HOME = System.getProperty("java.home"); private static final String JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS = "jdk.httpclient.allowRestrictedHeaders"; static final Set<String> DEFAULT_RESTRICTED_HEADERS; static { TreeSet<String> treeSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); treeSet.addAll(Set.of( "connection", "content-length", "expect", "host", "upgrade" )); DEFAULT_RESTRICTED_HEADERS = Collections.unmodifiableSet(treeSet); } private final ClientLogger logger = new ClientLogger(JdkAsyncHttpClientBuilder.class); private java.net.http.HttpClient.Builder httpClientBuilder; private Duration connectionTimeout; private ProxyOptions proxyOptions; private Configuration configuration; private Executor executor; /** * Creates JdkAsyncHttpClientBuilder. */ public JdkAsyncHttpClientBuilder() { } /** * Creates JdkAsyncHttpClientBuilder from the builder of an existing {@link java.net.http.HttpClient.Builder}. * * @param httpClientBuilder the HttpClient builder to use * @throws NullPointerException if {@code httpClientBuilder} is null */ public JdkAsyncHttpClientBuilder(java.net.http.HttpClient.Builder httpClientBuilder) { this.httpClientBuilder = Objects.requireNonNull(httpClientBuilder, "'httpClientBuilder' cannot be null."); } /** * Sets the executor to be used for asynchronous and dependent tasks. This cannot be null. * * <p> If this method is not invoked prior to {@linkplain * newly built {@code HttpClient}. * * @param executor the executor to be used for asynchronous and dependent tasks * @return the updated JdkAsyncHttpClientBuilder object * @throws NullPointerException if {@code executor} is null */ public JdkAsyncHttpClientBuilder executor(Executor executor) { this.executor = Objects.requireNonNull(executor, "executor can not be null"); return this; } /** * Sets the connection timeout. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.jdk.httpclient.JdkAsyncHttpClientBuilder.connectionTimeout * * The default connection timeout is 60 seconds. * * @param connectionTimeout the connection timeout * @return the updated JdkAsyncHttpClientBuilder object */ public JdkAsyncHttpClientBuilder connectionTimeout(Duration connectionTimeout) { this.connectionTimeout = connectionTimeout; return this; } /** * Sets the proxy. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.jdk.httpclient.JdkAsyncHttpClientBuilder.proxy * * @param proxyOptions The proxy configuration to use. * @return the updated {@link JdkAsyncHttpClientBuilder} object */ public JdkAsyncHttpClientBuilder proxy(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Sets the configuration store that is used during construction of the HTTP client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated JdkAsyncHttpClientBuilder object. */ public JdkAsyncHttpClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Build a HttpClient with current configurations. * * @return a {@link HttpClient}. */ public HttpClient build() { java.net.http.HttpClient.Builder httpClientBuilder = this.httpClientBuilder == null ? java.net.http.HttpClient.newBuilder() : this.httpClientBuilder; httpClientBuilder = (this.connectionTimeout != null) ? httpClientBuilder.connectTimeout(this.connectionTimeout) : httpClientBuilder.connectTimeout(DEFAULT_CONNECT_TIMEOUT); Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; ProxyOptions buildProxyOptions = (proxyOptions == null && buildConfiguration != Configuration.NONE) ? ProxyOptions.fromConfiguration(buildConfiguration) : proxyOptions; if (executor != null) { httpClientBuilder.executor(executor); } if (buildProxyOptions != null) { httpClientBuilder = httpClientBuilder.proxy(new JdkHttpClientProxySelector( buildProxyOptions.getType().toProxyType(), buildProxyOptions.getAddress(), buildProxyOptions.getNonProxyHosts())); if (buildProxyOptions.getUsername() != null) { httpClientBuilder .authenticator(new ProxyAuthenticator(buildProxyOptions.getUsername(), buildProxyOptions.getPassword())); } } return new JdkAsyncHttpClient(httpClientBuilder.build(), Collections.unmodifiableSet(getRestrictedHeaders())); } private Set<String> getAllowRestrictedHeaders() { Properties properties = getNetworkProperties(); String[] allowRestrictedHeadersNetProperties = properties.getProperty(JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS, "").split(","); Configuration config = (this.configuration == null) ? Configuration.getGlobalConfiguration() : configuration; String[] allowRestrictedHeadersSystemProperties = config.get(JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS, "") .split(","); Set<String> allowRestrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); allowRestrictedHeaders.addAll( Arrays.stream(allowRestrictedHeadersSystemProperties) .map(String::trim) .collect(Collectors.toSet())); allowRestrictedHeaders.addAll( Arrays.stream(allowRestrictedHeadersNetProperties) .map(String::trim) .collect(Collectors.toSet())); return allowRestrictedHeaders; } Properties getNetworkProperties() { Path path = Paths.get(JAVA_HOME, "conf", "net.properties"); Properties properties = new Properties(); try (Reader reader = Files.newBufferedReader(path)) { properties.load(reader); } catch (IOException e) { logger.warning("Cannot read net properties file at path {}", path, e); } return properties; } private static class ProxyAuthenticator extends Authenticator { private final String userName; private final String password; ProxyAuthenticator(String userName, String password) { this.userName = userName; this.password = password; } @Override protected PasswordAuthentication getPasswordAuthentication() { return new PasswordAuthentication(this.userName, password.toCharArray()); } } }
Since most of our proxy and other configurations are checking both, users might want to use the same location (either env var or sys properties) for all their configurations. So, checking both as it makes it easier for the user.
private Set<String> getAllowRestrictedHeaders() { Properties properties = getNetworkProperties(); String[] allowRestrictedHeadersNetProperties = properties.getProperty(JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS, "").split(","); Configuration config = (this.configuration == null) ? Configuration.getGlobalConfiguration() : configuration; String[] allowRestrictedHeadersSystemProperties = config.get(JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS, "") .split(","); Set<String> allowRestrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); allowRestrictedHeaders.addAll( Arrays.stream(allowRestrictedHeadersSystemProperties) .map(String::trim) .collect(Collectors.toSet())); allowRestrictedHeaders.addAll( Arrays.stream(allowRestrictedHeadersNetProperties) .map(String::trim) .collect(Collectors.toSet())); return allowRestrictedHeaders; }
Configuration config = (this.configuration == null)
private Set<String> getAllowRestrictedHeaders() { Properties properties = getNetworkProperties(); String[] allowRestrictedHeadersNetProperties = properties.getProperty(JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS, "").split(","); Configuration config = (this.configuration == null) ? Configuration.getGlobalConfiguration() : configuration; String[] allowRestrictedHeadersSystemProperties = config.get(JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS, "") .split(","); Set<String> allowRestrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); allowRestrictedHeaders.addAll( Arrays.stream(allowRestrictedHeadersSystemProperties) .map(String::trim) .collect(Collectors.toSet())); allowRestrictedHeaders.addAll( Arrays.stream(allowRestrictedHeadersNetProperties) .map(String::trim) .collect(Collectors.toSet())); return allowRestrictedHeaders; }
class JdkAsyncHttpClientBuilder { private static final Duration DEFAULT_CONNECT_TIMEOUT = Duration.ofSeconds(60); private static final String JAVA_HOME = System.getProperty("java.home"); private static final String JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS = "jdk.httpclient.allowRestrictedHeaders"; static final Set<String> DEFAULT_RESTRICTED_HEADERS; static { TreeSet<String> treeSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); treeSet.addAll(Set.of( "connection", "content-length", "expect", "host", "upgrade" )); DEFAULT_RESTRICTED_HEADERS = Collections.unmodifiableSet(treeSet); } private final ClientLogger logger = new ClientLogger(JdkAsyncHttpClientBuilder.class); private java.net.http.HttpClient.Builder httpClientBuilder; private Duration connectionTimeout; private ProxyOptions proxyOptions; private Configuration configuration; private Executor executor; /** * Creates JdkAsyncHttpClientBuilder. */ public JdkAsyncHttpClientBuilder() { } /** * Creates JdkAsyncHttpClientBuilder from the builder of an existing {@link java.net.http.HttpClient.Builder}. * * @param httpClientBuilder the HttpClient builder to use * @throws NullPointerException if {@code httpClientBuilder} is null */ public JdkAsyncHttpClientBuilder(java.net.http.HttpClient.Builder httpClientBuilder) { this.httpClientBuilder = Objects.requireNonNull(httpClientBuilder, "'httpClientBuilder' cannot be null."); } /** * Sets the executor to be used for asynchronous and dependent tasks. This cannot be null. * * <p> If this method is not invoked prior to {@linkplain * newly built {@code HttpClient}. * * @param executor the executor to be used for asynchronous and dependent tasks * @return the updated JdkAsyncHttpClientBuilder object * @throws NullPointerException if {@code executor} is null */ public JdkAsyncHttpClientBuilder executor(Executor executor) { this.executor = Objects.requireNonNull(executor, "executor can not be null"); return this; } /** * Sets the connection timeout. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.jdk.httpclient.JdkAsyncHttpClientBuilder.connectionTimeout * * The default connection timeout is 60 seconds. * * @param connectionTimeout the connection timeout * @return the updated JdkAsyncHttpClientBuilder object */ public JdkAsyncHttpClientBuilder connectionTimeout(Duration connectionTimeout) { this.connectionTimeout = connectionTimeout; return this; } /** * Sets the proxy. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.jdk.httpclient.JdkAsyncHttpClientBuilder.proxy * * @param proxyOptions The proxy configuration to use. * @return the updated {@link JdkAsyncHttpClientBuilder} object */ public JdkAsyncHttpClientBuilder proxy(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Sets the configuration store that is used during construction of the HTTP client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated JdkAsyncHttpClientBuilder object. */ public JdkAsyncHttpClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Build a HttpClient with current configurations. * * @return a {@link HttpClient}. */ public HttpClient build() { java.net.http.HttpClient.Builder httpClientBuilder = this.httpClientBuilder == null ? java.net.http.HttpClient.newBuilder() : this.httpClientBuilder; httpClientBuilder = (this.connectionTimeout != null) ? httpClientBuilder.connectTimeout(this.connectionTimeout) : httpClientBuilder.connectTimeout(DEFAULT_CONNECT_TIMEOUT); Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; ProxyOptions buildProxyOptions = (proxyOptions == null && buildConfiguration != Configuration.NONE) ? ProxyOptions.fromConfiguration(buildConfiguration) : proxyOptions; if (executor != null) { httpClientBuilder.executor(executor); } if (buildProxyOptions != null) { httpClientBuilder = httpClientBuilder.proxy(new JdkHttpClientProxySelector( buildProxyOptions.getType().toProxyType(), buildProxyOptions.getAddress(), buildProxyOptions.getNonProxyHosts())); if (buildProxyOptions.getUsername() != null) { httpClientBuilder .authenticator(new ProxyAuthenticator(buildProxyOptions.getUsername(), buildProxyOptions.getPassword())); } } return new JdkAsyncHttpClient(httpClientBuilder.build(), Collections.unmodifiableSet(getRestrictedHeaders())); } Set<String> getRestrictedHeaders() { Set<String> allowRestrictedHeaders = getAllowRestrictedHeaders(); Set<String> restrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); restrictedHeaders.addAll(DEFAULT_RESTRICTED_HEADERS); restrictedHeaders.removeAll(allowRestrictedHeaders); return restrictedHeaders; } Properties getNetworkProperties() { Path path = Paths.get(JAVA_HOME, "conf", "net.properties"); Properties properties = new Properties(); try (Reader reader = Files.newBufferedReader(path)) { properties.load(reader); } catch (IOException e) { logger.warning("Cannot read net properties file at path {}", path, e); } return properties; } private static class ProxyAuthenticator extends Authenticator { private final String userName; private final String password; ProxyAuthenticator(String userName, String password) { this.userName = userName; this.password = password; } @Override protected PasswordAuthentication getPasswordAuthentication() { return new PasswordAuthentication(this.userName, password.toCharArray()); } } }
class JdkAsyncHttpClientBuilder { private static final Duration DEFAULT_CONNECT_TIMEOUT = Duration.ofSeconds(60); private static final String JAVA_HOME = System.getProperty("java.home"); private static final String JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS = "jdk.httpclient.allowRestrictedHeaders"; static final Set<String> DEFAULT_RESTRICTED_HEADERS; static { TreeSet<String> treeSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); treeSet.addAll(Set.of( "connection", "content-length", "expect", "host", "upgrade" )); DEFAULT_RESTRICTED_HEADERS = Collections.unmodifiableSet(treeSet); } private final ClientLogger logger = new ClientLogger(JdkAsyncHttpClientBuilder.class); private java.net.http.HttpClient.Builder httpClientBuilder; private Duration connectionTimeout; private ProxyOptions proxyOptions; private Configuration configuration; private Executor executor; /** * Creates JdkAsyncHttpClientBuilder. */ public JdkAsyncHttpClientBuilder() { } /** * Creates JdkAsyncHttpClientBuilder from the builder of an existing {@link java.net.http.HttpClient.Builder}. * * @param httpClientBuilder the HttpClient builder to use * @throws NullPointerException if {@code httpClientBuilder} is null */ public JdkAsyncHttpClientBuilder(java.net.http.HttpClient.Builder httpClientBuilder) { this.httpClientBuilder = Objects.requireNonNull(httpClientBuilder, "'httpClientBuilder' cannot be null."); } /** * Sets the executor to be used for asynchronous and dependent tasks. This cannot be null. * * <p> If this method is not invoked prior to {@linkplain * newly built {@code HttpClient}. * * @param executor the executor to be used for asynchronous and dependent tasks * @return the updated JdkAsyncHttpClientBuilder object * @throws NullPointerException if {@code executor} is null */ public JdkAsyncHttpClientBuilder executor(Executor executor) { this.executor = Objects.requireNonNull(executor, "executor can not be null"); return this; } /** * Sets the connection timeout. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.jdk.httpclient.JdkAsyncHttpClientBuilder.connectionTimeout * * The default connection timeout is 60 seconds. * * @param connectionTimeout the connection timeout * @return the updated JdkAsyncHttpClientBuilder object */ public JdkAsyncHttpClientBuilder connectionTimeout(Duration connectionTimeout) { this.connectionTimeout = connectionTimeout; return this; } /** * Sets the proxy. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.jdk.httpclient.JdkAsyncHttpClientBuilder.proxy * * @param proxyOptions The proxy configuration to use. * @return the updated {@link JdkAsyncHttpClientBuilder} object */ public JdkAsyncHttpClientBuilder proxy(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Sets the configuration store that is used during construction of the HTTP client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated JdkAsyncHttpClientBuilder object. */ public JdkAsyncHttpClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Build a HttpClient with current configurations. * * @return a {@link HttpClient}. */ public HttpClient build() { java.net.http.HttpClient.Builder httpClientBuilder = this.httpClientBuilder == null ? java.net.http.HttpClient.newBuilder() : this.httpClientBuilder; httpClientBuilder = (this.connectionTimeout != null) ? httpClientBuilder.connectTimeout(this.connectionTimeout) : httpClientBuilder.connectTimeout(DEFAULT_CONNECT_TIMEOUT); Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; ProxyOptions buildProxyOptions = (proxyOptions == null && buildConfiguration != Configuration.NONE) ? ProxyOptions.fromConfiguration(buildConfiguration) : proxyOptions; if (executor != null) { httpClientBuilder.executor(executor); } if (buildProxyOptions != null) { httpClientBuilder = httpClientBuilder.proxy(new JdkHttpClientProxySelector( buildProxyOptions.getType().toProxyType(), buildProxyOptions.getAddress(), buildProxyOptions.getNonProxyHosts())); if (buildProxyOptions.getUsername() != null) { httpClientBuilder .authenticator(new ProxyAuthenticator(buildProxyOptions.getUsername(), buildProxyOptions.getPassword())); } } return new JdkAsyncHttpClient(httpClientBuilder.build(), Collections.unmodifiableSet(getRestrictedHeaders())); } Set<String> getRestrictedHeaders() { Set<String> allowRestrictedHeaders = getAllowRestrictedHeaders(); Set<String> restrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); restrictedHeaders.addAll(DEFAULT_RESTRICTED_HEADERS); restrictedHeaders.removeAll(allowRestrictedHeaders); return restrictedHeaders; } Properties getNetworkProperties() { Path path = Paths.get(JAVA_HOME, "conf", "net.properties"); Properties properties = new Properties(); try (Reader reader = Files.newBufferedReader(path)) { properties.load(reader); } catch (IOException e) { logger.warning("Cannot read net properties file at path {}", path, e); } return properties; } private static class ProxyAuthenticator extends Authenticator { private final String userName; private final String password; ProxyAuthenticator(String userName, String password) { this.userName = userName; this.password = password; } @Override protected PasswordAuthentication getPasswordAuthentication() { return new PasswordAuthentication(this.userName, password.toCharArray()); } } }
Case insensitive order will allow for `set.contains()` to ignore case and evaluate if the header is restricted or not. See unit tests.
Set<String> getRestrictedHeaders() { Set<String> allowRestrictedHeaders = getAllowRestrictedHeaders(); Set<String> restrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); restrictedHeaders.addAll(DEFAULT_RESTRICTED_HEADERS); restrictedHeaders.removeAll(allowRestrictedHeaders); return restrictedHeaders; }
Set<String> restrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
Set<String> getRestrictedHeaders() { Set<String> allowRestrictedHeaders = getAllowRestrictedHeaders(); Set<String> restrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); restrictedHeaders.addAll(DEFAULT_RESTRICTED_HEADERS); restrictedHeaders.removeAll(allowRestrictedHeaders); return restrictedHeaders; }
class JdkAsyncHttpClientBuilder { private static final Duration DEFAULT_CONNECT_TIMEOUT = Duration.ofSeconds(60); private static final String JAVA_HOME = System.getProperty("java.home"); private static final String JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS = "jdk.httpclient.allowRestrictedHeaders"; static final Set<String> DEFAULT_RESTRICTED_HEADERS; static { TreeSet<String> treeSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); treeSet.addAll(Set.of( "connection", "content-length", "expect", "host", "upgrade" )); DEFAULT_RESTRICTED_HEADERS = Collections.unmodifiableSet(treeSet); } private final ClientLogger logger = new ClientLogger(JdkAsyncHttpClientBuilder.class); private java.net.http.HttpClient.Builder httpClientBuilder; private Duration connectionTimeout; private ProxyOptions proxyOptions; private Configuration configuration; private Executor executor; /** * Creates JdkAsyncHttpClientBuilder. */ public JdkAsyncHttpClientBuilder() { } /** * Creates JdkAsyncHttpClientBuilder from the builder of an existing {@link java.net.http.HttpClient.Builder}. * * @param httpClientBuilder the HttpClient builder to use * @throws NullPointerException if {@code httpClientBuilder} is null */ public JdkAsyncHttpClientBuilder(java.net.http.HttpClient.Builder httpClientBuilder) { this.httpClientBuilder = Objects.requireNonNull(httpClientBuilder, "'httpClientBuilder' cannot be null."); } /** * Sets the executor to be used for asynchronous and dependent tasks. This cannot be null. * * <p> If this method is not invoked prior to {@linkplain * newly built {@code HttpClient}. * * @param executor the executor to be used for asynchronous and dependent tasks * @return the updated JdkAsyncHttpClientBuilder object * @throws NullPointerException if {@code executor} is null */ public JdkAsyncHttpClientBuilder executor(Executor executor) { this.executor = Objects.requireNonNull(executor, "executor can not be null"); return this; } /** * Sets the connection timeout. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.jdk.httpclient.JdkAsyncHttpClientBuilder.connectionTimeout * * The default connection timeout is 60 seconds. * * @param connectionTimeout the connection timeout * @return the updated JdkAsyncHttpClientBuilder object */ public JdkAsyncHttpClientBuilder connectionTimeout(Duration connectionTimeout) { this.connectionTimeout = connectionTimeout; return this; } /** * Sets the proxy. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.jdk.httpclient.JdkAsyncHttpClientBuilder.proxy * * @param proxyOptions The proxy configuration to use. * @return the updated {@link JdkAsyncHttpClientBuilder} object */ public JdkAsyncHttpClientBuilder proxy(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Sets the configuration store that is used during construction of the HTTP client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated JdkAsyncHttpClientBuilder object. */ public JdkAsyncHttpClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Build a HttpClient with current configurations. * * @return a {@link HttpClient}. */ public HttpClient build() { java.net.http.HttpClient.Builder httpClientBuilder = this.httpClientBuilder == null ? java.net.http.HttpClient.newBuilder() : this.httpClientBuilder; httpClientBuilder = (this.connectionTimeout != null) ? httpClientBuilder.connectTimeout(this.connectionTimeout) : httpClientBuilder.connectTimeout(DEFAULT_CONNECT_TIMEOUT); Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; ProxyOptions buildProxyOptions = (proxyOptions == null && buildConfiguration != Configuration.NONE) ? ProxyOptions.fromConfiguration(buildConfiguration) : proxyOptions; if (executor != null) { httpClientBuilder.executor(executor); } if (buildProxyOptions != null) { httpClientBuilder = httpClientBuilder.proxy(new JdkHttpClientProxySelector( buildProxyOptions.getType().toProxyType(), buildProxyOptions.getAddress(), buildProxyOptions.getNonProxyHosts())); if (buildProxyOptions.getUsername() != null) { httpClientBuilder .authenticator(new ProxyAuthenticator(buildProxyOptions.getUsername(), buildProxyOptions.getPassword())); } } return new JdkAsyncHttpClient(httpClientBuilder.build(), Collections.unmodifiableSet(getRestrictedHeaders())); } private Set<String> getAllowRestrictedHeaders() { Properties properties = getNetworkProperties(); String[] allowRestrictedHeadersNetProperties = properties.getProperty(JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS, "").split(","); Configuration config = (this.configuration == null) ? Configuration.getGlobalConfiguration() : configuration; String[] allowRestrictedHeadersSystemProperties = config.get(JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS, "") .split(","); Set<String> allowRestrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); allowRestrictedHeaders.addAll( Arrays.stream(allowRestrictedHeadersSystemProperties) .map(String::trim) .collect(Collectors.toSet())); allowRestrictedHeaders.addAll( Arrays.stream(allowRestrictedHeadersNetProperties) .map(String::trim) .collect(Collectors.toSet())); return allowRestrictedHeaders; } Properties getNetworkProperties() { Path path = Paths.get(JAVA_HOME, "conf", "net.properties"); Properties properties = new Properties(); try (Reader reader = Files.newBufferedReader(path)) { properties.load(reader); } catch (IOException e) { logger.warning("Cannot read net properties file at path {}", path, e); } return properties; } private static class ProxyAuthenticator extends Authenticator { private final String userName; private final String password; ProxyAuthenticator(String userName, String password) { this.userName = userName; this.password = password; } @Override protected PasswordAuthentication getPasswordAuthentication() { return new PasswordAuthentication(this.userName, password.toCharArray()); } } }
class JdkAsyncHttpClientBuilder { private static final Duration DEFAULT_CONNECT_TIMEOUT = Duration.ofSeconds(60); private static final String JAVA_HOME = System.getProperty("java.home"); private static final String JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS = "jdk.httpclient.allowRestrictedHeaders"; static final Set<String> DEFAULT_RESTRICTED_HEADERS; static { TreeSet<String> treeSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); treeSet.addAll(Set.of( "connection", "content-length", "expect", "host", "upgrade" )); DEFAULT_RESTRICTED_HEADERS = Collections.unmodifiableSet(treeSet); } private final ClientLogger logger = new ClientLogger(JdkAsyncHttpClientBuilder.class); private java.net.http.HttpClient.Builder httpClientBuilder; private Duration connectionTimeout; private ProxyOptions proxyOptions; private Configuration configuration; private Executor executor; /** * Creates JdkAsyncHttpClientBuilder. */ public JdkAsyncHttpClientBuilder() { } /** * Creates JdkAsyncHttpClientBuilder from the builder of an existing {@link java.net.http.HttpClient.Builder}. * * @param httpClientBuilder the HttpClient builder to use * @throws NullPointerException if {@code httpClientBuilder} is null */ public JdkAsyncHttpClientBuilder(java.net.http.HttpClient.Builder httpClientBuilder) { this.httpClientBuilder = Objects.requireNonNull(httpClientBuilder, "'httpClientBuilder' cannot be null."); } /** * Sets the executor to be used for asynchronous and dependent tasks. This cannot be null. * * <p> If this method is not invoked prior to {@linkplain * newly built {@code HttpClient}. * * @param executor the executor to be used for asynchronous and dependent tasks * @return the updated JdkAsyncHttpClientBuilder object * @throws NullPointerException if {@code executor} is null */ public JdkAsyncHttpClientBuilder executor(Executor executor) { this.executor = Objects.requireNonNull(executor, "executor can not be null"); return this; } /** * Sets the connection timeout. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.jdk.httpclient.JdkAsyncHttpClientBuilder.connectionTimeout * * The default connection timeout is 60 seconds. * * @param connectionTimeout the connection timeout * @return the updated JdkAsyncHttpClientBuilder object */ public JdkAsyncHttpClientBuilder connectionTimeout(Duration connectionTimeout) { this.connectionTimeout = connectionTimeout; return this; } /** * Sets the proxy. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.jdk.httpclient.JdkAsyncHttpClientBuilder.proxy * * @param proxyOptions The proxy configuration to use. * @return the updated {@link JdkAsyncHttpClientBuilder} object */ public JdkAsyncHttpClientBuilder proxy(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Sets the configuration store that is used during construction of the HTTP client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated JdkAsyncHttpClientBuilder object. */ public JdkAsyncHttpClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Build a HttpClient with current configurations. * * @return a {@link HttpClient}. */ public HttpClient build() { java.net.http.HttpClient.Builder httpClientBuilder = this.httpClientBuilder == null ? java.net.http.HttpClient.newBuilder() : this.httpClientBuilder; httpClientBuilder = (this.connectionTimeout != null) ? httpClientBuilder.connectTimeout(this.connectionTimeout) : httpClientBuilder.connectTimeout(DEFAULT_CONNECT_TIMEOUT); Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; ProxyOptions buildProxyOptions = (proxyOptions == null && buildConfiguration != Configuration.NONE) ? ProxyOptions.fromConfiguration(buildConfiguration) : proxyOptions; if (executor != null) { httpClientBuilder.executor(executor); } if (buildProxyOptions != null) { httpClientBuilder = httpClientBuilder.proxy(new JdkHttpClientProxySelector( buildProxyOptions.getType().toProxyType(), buildProxyOptions.getAddress(), buildProxyOptions.getNonProxyHosts())); if (buildProxyOptions.getUsername() != null) { httpClientBuilder .authenticator(new ProxyAuthenticator(buildProxyOptions.getUsername(), buildProxyOptions.getPassword())); } } return new JdkAsyncHttpClient(httpClientBuilder.build(), Collections.unmodifiableSet(getRestrictedHeaders())); } private Set<String> getAllowRestrictedHeaders() { Properties properties = getNetworkProperties(); String[] allowRestrictedHeadersNetProperties = properties.getProperty(JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS, "").split(","); Configuration config = (this.configuration == null) ? Configuration.getGlobalConfiguration() : configuration; String[] allowRestrictedHeadersSystemProperties = config.get(JDK_HTTPCLIENT_ALLOW_RESTRICTED_HEADERS, "") .split(","); Set<String> allowRestrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); allowRestrictedHeaders.addAll( Arrays.stream(allowRestrictedHeadersSystemProperties) .map(String::trim) .collect(Collectors.toSet())); allowRestrictedHeaders.addAll( Arrays.stream(allowRestrictedHeadersNetProperties) .map(String::trim) .collect(Collectors.toSet())); return allowRestrictedHeaders; } Properties getNetworkProperties() { Path path = Paths.get(JAVA_HOME, "conf", "net.properties"); Properties properties = new Properties(); try (Reader reader = Files.newBufferedReader(path)) { properties.load(reader); } catch (IOException e) { logger.warning("Cannot read net properties file at path {}", path, e); } return properties; } private static class ProxyAuthenticator extends Authenticator { private final String userName; private final String password; ProxyAuthenticator(String userName, String password) { this.userName = userName; this.password = password; } @Override protected PasswordAuthentication getPasswordAuthentication() { return new PasswordAuthentication(this.userName, password.toCharArray()); } } }
Will it be clear where the causes are? What identifies the causes if they want to find them quickly in the log?
public Mono<AccessToken> getToken(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); return Flux.fromIterable(credentials) .flatMap(p -> p.getToken(request) .doOnNext(t -> logger.info("Azure Identity => Attempted credential {} returns a token", p.getClass().getSimpleName())) .onErrorResume(CredentialUnavailableException.class, t -> { exceptions.add(t); logger.info("Azure Identity => Attempted credential {} is unavailable.", p.getClass().getSimpleName()); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { StringBuilder message = new StringBuilder("Tried " + credentials.stream().map(c -> c.getClass().getSimpleName()) .collect(Collectors.joining(", ")) + " but failed to acquire a token for any of them. Please verify the" + " environment for the credentials" + " and see more details in the causes below."); CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage(), last); } return Mono.error(new CredentialUnavailableException(message.toString(), last)); })); }
+ " and see more details in the causes below.");
public Mono<AccessToken> getToken(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); return Flux.fromIterable(credentials) .flatMap(p -> p.getToken(request) .doOnNext(t -> logger.info("Azure Identity => Attempted credential {} returns a token", p.getClass().getSimpleName())) .onErrorResume(Exception.class, t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( unavailableError + p.getClass().getSimpleName() + " authentication failed. Error Details: " + t.getMessage(), null, t)); } exceptions.add((CredentialUnavailableException) t); logger.info("Azure Identity => Attempted credential {} is unavailable.", p.getClass().getSimpleName()); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage(), last.getCause()); } return Mono.error(last); })); }
class ChainedTokenCredential implements TokenCredential { private final Deque<TokenCredential> credentials; private final ClientLogger logger = new ClientLogger(getClass()); /** * Create an instance of chained token credential that aggregates a list of token * credentials. */ ChainedTokenCredential(Deque<TokenCredential> credentials) { this.credentials = credentials; } @Override }
class ChainedTokenCredential implements TokenCredential { private final ClientLogger logger = new ClientLogger(getClass()); private final List<TokenCredential> credentials; private final String unavailableError = this.getClass().getSimpleName() + " authentication failed. ---> "; /** * Create an instance of chained token credential that aggregates a list of token * credentials. */ ChainedTokenCredential(List<TokenCredential> credentials) { this.credentials = Collections.unmodifiableList(credentials); } @Override /** * Get the read-only list of credentials sequentially used to attempt authentication. * * @return The list of {@link TokenCredential}. */ public List<TokenCredential> getCredentials() { return credentials; } }
yes, that's a common Javadoc that signals the Java version info.
public static void main(String[] args) throws IOException { FormRecognizerAsyncClient client = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildAsyncClient(); File analyzeFile = new File("../formrecognizer/azure-ai-formrecognizer/src/samples/java/sample-forms/" + "forms/Form_1.jpg"); byte[] fileContent = Files.readAllBytes(analyzeFile.toPath()); PollerFlux<OperationResult, List<RecognizedForm>> labeledCustomFormPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions( toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{labeled_model_Id}") .setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true) .setPollInterval(Duration.ofSeconds(5))); PollerFlux<OperationResult, List<RecognizedForm>> unlabeledCustomFormPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{unlabeled_model_Id}", FormContentType.APPLICATION_PDF); Mono<List<RecognizedForm>> labeledDataResult = labeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); Mono<List<RecognizedForm>> unlabeledDataResult = unlabeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); System.out.println("--------Recognizing forms with labeled custom model--------"); labeledDataResult.subscribe(formsWithLabeledModel -> formsWithLabeledModel.forEach(labeledForm -> labeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f," + " %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); System.out.println("Value for a specific labeled field using the training-time label:"); labeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Merchant".equals(formFieldEntry.getKey())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Merchant name is: %s%n", formFieldEntry.getValue())); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } System.out.println("-----------------------------------------------------------"); System.out.println("-------Recognizing forms with unlabeled custom model-------"); unlabeledDataResult.subscribe(recognizedForms -> recognizedForms.forEach(unLabeledForm -> unLabeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } final StringBuilder boundingBoxLabelStr = new StringBuilder(); if (formField.getLabelText() != null && formField.getLabelText().getBoundingBox() != null) { formField.getLabelText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has label %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getLabelText().getText(), boundingBoxLabelStr, formField.getConfidence()); System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence " + "score of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); unLabeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Vendor Name:".equals(formFieldEntry.getValue().getLabelText().getText())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Vendor name is: %s%n", formFieldEntry.getValue())); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } }
public static void main(String[] args) throws IOException { FormRecognizerAsyncClient client = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildAsyncClient(); File analyzeFile = new File("../formrecognizer/azure-ai-formrecognizer/src/samples/java/sample-forms/" + "forms/Form_1.jpg"); byte[] fileContent = Files.readAllBytes(analyzeFile.toPath()); PollerFlux<OperationResult, List<RecognizedForm>> labeledCustomFormPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions( toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{labeled_model_Id}") .setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true) .setPollInterval(Duration.ofSeconds(5))); PollerFlux<OperationResult, List<RecognizedForm>> unlabeledCustomFormPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{unlabeled_model_Id}", FormContentType.APPLICATION_PDF); Mono<List<RecognizedForm>> labeledDataResult = labeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); Mono<List<RecognizedForm>> unlabeledDataResult = unlabeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); System.out.println("--------Recognizing forms with labeled custom model--------"); labeledDataResult.subscribe(formsWithLabeledModel -> formsWithLabeledModel.forEach(labeledForm -> labeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f," + " %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); System.out.println("Value for a specific labeled field using the training-time label:"); labeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Merchant".equals(formFieldEntry.getKey())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Merchant name is: %s%n", formFieldEntry.getValue())); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } System.out.println("-----------------------------------------------------------"); System.out.println("-------Recognizing forms with unlabeled custom model-------"); unlabeledDataResult.subscribe(recognizedForms -> recognizedForms.forEach(unLabeledForm -> unLabeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } final StringBuilder boundingBoxLabelStr = new StringBuilder(); if (formField.getLabelText() != null && formField.getLabelText().getBoundingBox() != null) { formField.getLabelText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has label %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getLabelText().getText(), boundingBoxLabelStr, formField.getConfidence()); System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence " + "score of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); unLabeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Vendor Name:".equals(formFieldEntry.getValue().getLabelText().getText())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Vendor name is: %s%n", formFieldEntry.getValue())); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } }
class AdvancedDiffLabeledUnlabeledDataAsync { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ }
class AdvancedDiffLabeledUnlabeledDataAsync { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ }
This error message has been improved through this PR: https://github.com/Azure/azure-sdk-for-java/pull/9022
public Mono<AccessToken> getToken(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); return Flux.fromIterable(credentials) .flatMap(p -> p.getToken(request) .doOnNext(t -> logger.info("Azure Identity => Attempted credential {} returns a token", p.getClass().getSimpleName())) .onErrorResume(CredentialUnavailableException.class, t -> { exceptions.add(t); logger.info("Azure Identity => Attempted credential {} is unavailable.", p.getClass().getSimpleName()); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { StringBuilder message = new StringBuilder("Tried " + credentials.stream().map(c -> c.getClass().getSimpleName()) .collect(Collectors.joining(", ")) + " but failed to acquire a token for any of them. Please verify the" + " environment for the credentials" + " and see more details in the causes below."); CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage(), last); } return Mono.error(new CredentialUnavailableException(message.toString(), last)); })); }
+ " and see more details in the causes below.");
public Mono<AccessToken> getToken(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); return Flux.fromIterable(credentials) .flatMap(p -> p.getToken(request) .doOnNext(t -> logger.info("Azure Identity => Attempted credential {} returns a token", p.getClass().getSimpleName())) .onErrorResume(Exception.class, t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( unavailableError + p.getClass().getSimpleName() + " authentication failed. Error Details: " + t.getMessage(), null, t)); } exceptions.add((CredentialUnavailableException) t); logger.info("Azure Identity => Attempted credential {} is unavailable.", p.getClass().getSimpleName()); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage(), last.getCause()); } return Mono.error(last); })); }
class ChainedTokenCredential implements TokenCredential { private final Deque<TokenCredential> credentials; private final ClientLogger logger = new ClientLogger(getClass()); /** * Create an instance of chained token credential that aggregates a list of token * credentials. */ ChainedTokenCredential(Deque<TokenCredential> credentials) { this.credentials = credentials; } @Override }
class ChainedTokenCredential implements TokenCredential { private final ClientLogger logger = new ClientLogger(getClass()); private final List<TokenCredential> credentials; private final String unavailableError = this.getClass().getSimpleName() + " authentication failed. ---> "; /** * Create an instance of chained token credential that aggregates a list of token * credentials. */ ChainedTokenCredential(List<TokenCredential> credentials) { this.credentials = Collections.unmodifiableList(credentials); } @Override /** * Get the read-only list of credentials sequentially used to attempt authentication. * * @return The list of {@link TokenCredential}. */ public List<TokenCredential> getCredentials() { return credentials; } }
Can use `if ( ! (t instanceof CredentialUnavailableException) )`?
public Mono<AccessToken> getToken(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); return Flux.fromIterable(credentials) .flatMap(p -> p.getToken(request).onErrorResume(Exception.class, t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( unavailableError + p.getClass().getSimpleName() + " authentication failed. Error Details: " + t.getMessage(), null, t)); } exceptions.add((CredentialUnavailableException) t); logger.info("Azure Identity => Attempted credential {} is unavailable.", p.getClass().getSimpleName()); return Mono.empty(); }).doOnNext(t -> logger.info("Azure Identity => Attempted credential {} returns a token", p.getClass().getSimpleName())), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage(), last.getCause()); } return Mono.error(last); })); }
if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) {
public Mono<AccessToken> getToken(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); return Flux.fromIterable(credentials) .flatMap(p -> p.getToken(request) .doOnNext(t -> logger.info("Azure Identity => Attempted credential {} returns a token", p.getClass().getSimpleName())) .onErrorResume(Exception.class, t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( unavailableError + p.getClass().getSimpleName() + " authentication failed. Error Details: " + t.getMessage(), null, t)); } exceptions.add((CredentialUnavailableException) t); logger.info("Azure Identity => Attempted credential {} is unavailable.", p.getClass().getSimpleName()); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage(), last.getCause()); } return Mono.error(last); })); }
class ChainedTokenCredential implements TokenCredential { private final Deque<TokenCredential> credentials; private final ClientLogger logger = new ClientLogger(getClass()); private final String unavailableError = this.getClass().getSimpleName() + " authentication failed. ---> "; /** * Create an instance of chained token credential that aggregates a list of token * credentials. */ ChainedTokenCredential(Deque<TokenCredential> credentials) { this.credentials = credentials; } @Override }
class ChainedTokenCredential implements TokenCredential { private final ClientLogger logger = new ClientLogger(getClass()); private final List<TokenCredential> credentials; private final String unavailableError = this.getClass().getSimpleName() + " authentication failed. ---> "; /** * Create an instance of chained token credential that aggregates a list of token * credentials. */ ChainedTokenCredential(List<TokenCredential> credentials) { this.credentials = Collections.unmodifiableList(credentials); } @Override /** * Get the read-only list of credentials sequentially used to attempt authentication. * * @return The list of {@link TokenCredential}. */ public List<TokenCredential> getCredentials() { return credentials; } }
do we want to replace this with doOnSuccess ? and also add doOnRequest ?
public Mono<AccessToken> getToken(TokenRequestContext request) { return Mono.defer(() -> { if (cachedToken.get() != null) { return identityClient.authenticateWithPublicClientCache(request, cachedToken.get()) .onErrorResume(t -> Mono.empty()); } else { return Mono.empty(); } }).switchIfEmpty( Mono.defer(() -> identityClient.authenticateWithAuthorizationCode(request, authCode, redirectUri))) .map(msalToken -> { cachedToken.set(new MsalAuthenticationAccount( new AuthenticationRecord(msalToken.getAuthenticationResult(), identityClient.getTenantId()))); return (AccessToken) msalToken; }) .doOnNext(token -> LoggingUtil.logTokenSuccess(logger, request)) .doOnError(error -> LoggingUtil.logTokenError(logger, request, error)); }
.doOnNext(token -> LoggingUtil.logTokenSuccess(logger, request))
public Mono<AccessToken> getToken(TokenRequestContext request) { return Mono.defer(() -> { if (cachedToken.get() != null) { return identityClient.authenticateWithPublicClientCache(request, cachedToken.get()) .onErrorResume(t -> Mono.empty()); } else { return Mono.empty(); } }).switchIfEmpty( Mono.defer(() -> identityClient.authenticateWithAuthorizationCode(request, authCode, redirectUri))) .map(msalToken -> { cachedToken.set(new MsalAuthenticationAccount( new AuthenticationRecord(msalToken.getAuthenticationResult(), identityClient.getTenantId()))); return (AccessToken) msalToken; }) .doOnNext(token -> LoggingUtil.logTokenSuccess(logger, request)) .doOnError(error -> LoggingUtil.logTokenError(logger, request, error)); }
class AuthorizationCodeCredential implements TokenCredential { private final String authCode; private final URI redirectUri; private final IdentityClient identityClient; private final AtomicReference<MsalAuthenticationAccount> cachedToken; private final ClientLogger logger = new ClientLogger(AuthorizationCodeCredential.class); /** * Creates an AuthorizationCodeCredential with the given identity client options. * * @param clientId the client ID of the application * @param tenantId the tenant ID of the application * @param authCode the Oauth 2.0 authorization code grant * @param redirectUri the redirect URI used to authenticate to Azure Active Directory * @param identityClientOptions the options for configuring the identity client */ AuthorizationCodeCredential(String clientId, String tenantId, String authCode, URI redirectUri, IdentityClientOptions identityClientOptions) { identityClient = new IdentityClientBuilder() .tenantId(tenantId) .clientId(clientId) .identityClientOptions(identityClientOptions) .build(); this.cachedToken = new AtomicReference<>(); this.authCode = authCode; this.redirectUri = redirectUri; } @Override }
class AuthorizationCodeCredential implements TokenCredential { private final String authCode; private final URI redirectUri; private final IdentityClient identityClient; private final AtomicReference<MsalAuthenticationAccount> cachedToken; private final ClientLogger logger = new ClientLogger(AuthorizationCodeCredential.class); /** * Creates an AuthorizationCodeCredential with the given identity client options. * * @param clientId the client ID of the application * @param tenantId the tenant ID of the application * @param authCode the Oauth 2.0 authorization code grant * @param redirectUri the redirect URI used to authenticate to Azure Active Directory * @param identityClientOptions the options for configuring the identity client */ AuthorizationCodeCredential(String clientId, String tenantId, String authCode, URI redirectUri, IdentityClientOptions identityClientOptions) { identityClient = new IdentityClientBuilder() .tenantId(tenantId) .clientId(clientId) .identityClientOptions(identityClientOptions) .build(); this.cachedToken = new AtomicReference<>(); this.authCode = authCode; this.redirectUri = redirectUri; } @Override }
This can be misleading in cases when running on non-Azure hosted machines. As we default to VM credential if MSI endpoint is not set. Should we check if IMDS available before logging the environment ?
public Mono<AccessToken> getToken(TokenRequestContext request) { Mono<AccessToken> accessTokenMono; if (appServiceMSICredential != null) { accessTokenMono = appServiceMSICredential.authenticate(request); logger.info("Azure Identity => Managed Identity environment: MSI_ENDPOINT"); } else { accessTokenMono = virtualMachineMSICredential.authenticate(request); logger.info("Azure Identity => Managed Identity environment: IMDS"); } return accessTokenMono .doOnNext(token -> LoggingUtil.logTokenSuccess(logger, request)) .doOnError(error -> LoggingUtil.logTokenError(logger, request, error)); }
logger.info("Azure Identity => Managed Identity environment: IMDS");
public Mono<AccessToken> getToken(TokenRequestContext request) { Mono<AccessToken> accessTokenMono; if (appServiceMSICredential != null) { accessTokenMono = appServiceMSICredential.authenticate(request) .doOnSuccess((t -> logger.info("Azure Identity => Managed Identity environment: MSI_ENDPOINT"))); } else { accessTokenMono = virtualMachineMSICredential.authenticate(request) .doOnSuccess((t -> logger.info("Azure Identity => Managed Identity environment: IMDS"))); } return accessTokenMono .doOnNext(token -> LoggingUtil.logTokenSuccess(logger, request)) .doOnError(error -> LoggingUtil.logTokenError(logger, request, error)); }
class ManagedIdentityCredential implements TokenCredential { private final AppServiceMsiCredential appServiceMSICredential; private final VirtualMachineMsiCredential virtualMachineMSICredential; private final ClientLogger logger = new ClientLogger(ManagedIdentityCredential.class); /** * Creates an instance of the ManagedIdentityCredential. * @param clientId the client id of user assigned or system assigned identity * @param identityClientOptions the options for configuring the identity client. */ ManagedIdentityCredential(String clientId, IdentityClientOptions identityClientOptions) { IdentityClient identityClient = new IdentityClientBuilder() .clientId(clientId) .identityClientOptions(identityClientOptions) .build(); Configuration configuration = Configuration.getGlobalConfiguration().clone(); if (configuration.contains(Configuration.PROPERTY_MSI_ENDPOINT)) { appServiceMSICredential = new AppServiceMsiCredential(clientId, identityClient); virtualMachineMSICredential = null; } else { virtualMachineMSICredential = new VirtualMachineMsiCredential(clientId, identityClient); appServiceMSICredential = null; } } /** * Gets the client ID of user assigned or system assigned identity. * @return the client ID of user assigned or system assigned identity. */ public String getClientId() { return this.appServiceMSICredential != null ? this.appServiceMSICredential.getClientId() : this.virtualMachineMSICredential.getClientId(); } @Override }
class ManagedIdentityCredential implements TokenCredential { private final AppServiceMsiCredential appServiceMSICredential; private final VirtualMachineMsiCredential virtualMachineMSICredential; private final ClientLogger logger = new ClientLogger(ManagedIdentityCredential.class); /** * Creates an instance of the ManagedIdentityCredential. * @param clientId the client id of user assigned or system assigned identity * @param identityClientOptions the options for configuring the identity client. */ ManagedIdentityCredential(String clientId, IdentityClientOptions identityClientOptions) { IdentityClient identityClient = new IdentityClientBuilder() .clientId(clientId) .identityClientOptions(identityClientOptions) .build(); Configuration configuration = Configuration.getGlobalConfiguration().clone(); if (configuration.contains(Configuration.PROPERTY_MSI_ENDPOINT)) { appServiceMSICredential = new AppServiceMsiCredential(clientId, identityClient); virtualMachineMSICredential = null; } else { virtualMachineMSICredential = new VirtualMachineMsiCredential(clientId, identityClient); appServiceMSICredential = null; } LoggingUtil.logAvailableEnvironmentVariables(logger, configuration); } /** * Gets the client ID of user assigned or system assigned identity. * @return the client ID of user assigned or system assigned identity. */ public String getClientId() { return this.appServiceMSICredential != null ? this.appServiceMSICredential.getClientId() : this.virtualMachineMSICredential.getClientId(); } @Override }
I've moved this log to be only printed in doOnSuccess().
public Mono<AccessToken> getToken(TokenRequestContext request) { Mono<AccessToken> accessTokenMono; if (appServiceMSICredential != null) { accessTokenMono = appServiceMSICredential.authenticate(request); logger.info("Azure Identity => Managed Identity environment: MSI_ENDPOINT"); } else { accessTokenMono = virtualMachineMSICredential.authenticate(request); logger.info("Azure Identity => Managed Identity environment: IMDS"); } return accessTokenMono .doOnNext(token -> LoggingUtil.logTokenSuccess(logger, request)) .doOnError(error -> LoggingUtil.logTokenError(logger, request, error)); }
logger.info("Azure Identity => Managed Identity environment: IMDS");
public Mono<AccessToken> getToken(TokenRequestContext request) { Mono<AccessToken> accessTokenMono; if (appServiceMSICredential != null) { accessTokenMono = appServiceMSICredential.authenticate(request) .doOnSuccess((t -> logger.info("Azure Identity => Managed Identity environment: MSI_ENDPOINT"))); } else { accessTokenMono = virtualMachineMSICredential.authenticate(request) .doOnSuccess((t -> logger.info("Azure Identity => Managed Identity environment: IMDS"))); } return accessTokenMono .doOnNext(token -> LoggingUtil.logTokenSuccess(logger, request)) .doOnError(error -> LoggingUtil.logTokenError(logger, request, error)); }
class ManagedIdentityCredential implements TokenCredential { private final AppServiceMsiCredential appServiceMSICredential; private final VirtualMachineMsiCredential virtualMachineMSICredential; private final ClientLogger logger = new ClientLogger(ManagedIdentityCredential.class); /** * Creates an instance of the ManagedIdentityCredential. * @param clientId the client id of user assigned or system assigned identity * @param identityClientOptions the options for configuring the identity client. */ ManagedIdentityCredential(String clientId, IdentityClientOptions identityClientOptions) { IdentityClient identityClient = new IdentityClientBuilder() .clientId(clientId) .identityClientOptions(identityClientOptions) .build(); Configuration configuration = Configuration.getGlobalConfiguration().clone(); if (configuration.contains(Configuration.PROPERTY_MSI_ENDPOINT)) { appServiceMSICredential = new AppServiceMsiCredential(clientId, identityClient); virtualMachineMSICredential = null; } else { virtualMachineMSICredential = new VirtualMachineMsiCredential(clientId, identityClient); appServiceMSICredential = null; } } /** * Gets the client ID of user assigned or system assigned identity. * @return the client ID of user assigned or system assigned identity. */ public String getClientId() { return this.appServiceMSICredential != null ? this.appServiceMSICredential.getClientId() : this.virtualMachineMSICredential.getClientId(); } @Override }
class ManagedIdentityCredential implements TokenCredential { private final AppServiceMsiCredential appServiceMSICredential; private final VirtualMachineMsiCredential virtualMachineMSICredential; private final ClientLogger logger = new ClientLogger(ManagedIdentityCredential.class); /** * Creates an instance of the ManagedIdentityCredential. * @param clientId the client id of user assigned or system assigned identity * @param identityClientOptions the options for configuring the identity client. */ ManagedIdentityCredential(String clientId, IdentityClientOptions identityClientOptions) { IdentityClient identityClient = new IdentityClientBuilder() .clientId(clientId) .identityClientOptions(identityClientOptions) .build(); Configuration configuration = Configuration.getGlobalConfiguration().clone(); if (configuration.contains(Configuration.PROPERTY_MSI_ENDPOINT)) { appServiceMSICredential = new AppServiceMsiCredential(clientId, identityClient); virtualMachineMSICredential = null; } else { virtualMachineMSICredential = new VirtualMachineMsiCredential(clientId, identityClient); appServiceMSICredential = null; } LoggingUtil.logAvailableEnvironmentVariables(logger, configuration); } /** * Gets the client ID of user assigned or system assigned identity. * @return the client ID of user assigned or system assigned identity. */ public String getClientId() { return this.appServiceMSICredential != null ? this.appServiceMSICredential.getClientId() : this.virtualMachineMSICredential.getClientId(); } @Override }
I modified everything to be doOnSuccess. We don't log anything on doOnRequest().
public Mono<AccessToken> getToken(TokenRequestContext request) { return Mono.defer(() -> { if (cachedToken.get() != null) { return identityClient.authenticateWithPublicClientCache(request, cachedToken.get()) .onErrorResume(t -> Mono.empty()); } else { return Mono.empty(); } }).switchIfEmpty( Mono.defer(() -> identityClient.authenticateWithAuthorizationCode(request, authCode, redirectUri))) .map(msalToken -> { cachedToken.set(new MsalAuthenticationAccount( new AuthenticationRecord(msalToken.getAuthenticationResult(), identityClient.getTenantId()))); return (AccessToken) msalToken; }) .doOnNext(token -> LoggingUtil.logTokenSuccess(logger, request)) .doOnError(error -> LoggingUtil.logTokenError(logger, request, error)); }
.doOnNext(token -> LoggingUtil.logTokenSuccess(logger, request))
public Mono<AccessToken> getToken(TokenRequestContext request) { return Mono.defer(() -> { if (cachedToken.get() != null) { return identityClient.authenticateWithPublicClientCache(request, cachedToken.get()) .onErrorResume(t -> Mono.empty()); } else { return Mono.empty(); } }).switchIfEmpty( Mono.defer(() -> identityClient.authenticateWithAuthorizationCode(request, authCode, redirectUri))) .map(msalToken -> { cachedToken.set(new MsalAuthenticationAccount( new AuthenticationRecord(msalToken.getAuthenticationResult(), identityClient.getTenantId()))); return (AccessToken) msalToken; }) .doOnNext(token -> LoggingUtil.logTokenSuccess(logger, request)) .doOnError(error -> LoggingUtil.logTokenError(logger, request, error)); }
class AuthorizationCodeCredential implements TokenCredential { private final String authCode; private final URI redirectUri; private final IdentityClient identityClient; private final AtomicReference<MsalAuthenticationAccount> cachedToken; private final ClientLogger logger = new ClientLogger(AuthorizationCodeCredential.class); /** * Creates an AuthorizationCodeCredential with the given identity client options. * * @param clientId the client ID of the application * @param tenantId the tenant ID of the application * @param authCode the Oauth 2.0 authorization code grant * @param redirectUri the redirect URI used to authenticate to Azure Active Directory * @param identityClientOptions the options for configuring the identity client */ AuthorizationCodeCredential(String clientId, String tenantId, String authCode, URI redirectUri, IdentityClientOptions identityClientOptions) { identityClient = new IdentityClientBuilder() .tenantId(tenantId) .clientId(clientId) .identityClientOptions(identityClientOptions) .build(); this.cachedToken = new AtomicReference<>(); this.authCode = authCode; this.redirectUri = redirectUri; } @Override }
class AuthorizationCodeCredential implements TokenCredential { private final String authCode; private final URI redirectUri; private final IdentityClient identityClient; private final AtomicReference<MsalAuthenticationAccount> cachedToken; private final ClientLogger logger = new ClientLogger(AuthorizationCodeCredential.class); /** * Creates an AuthorizationCodeCredential with the given identity client options. * * @param clientId the client ID of the application * @param tenantId the tenant ID of the application * @param authCode the Oauth 2.0 authorization code grant * @param redirectUri the redirect URI used to authenticate to Azure Active Directory * @param identityClientOptions the options for configuring the identity client */ AuthorizationCodeCredential(String clientId, String tenantId, String authCode, URI redirectUri, IdentityClientOptions identityClientOptions) { identityClient = new IdentityClientBuilder() .tenantId(tenantId) .clientId(clientId) .identityClientOptions(identityClientOptions) .build(); this.cachedToken = new AtomicReference<>(); this.authCode = authCode; this.redirectUri = redirectUri; } @Override }
Same here, timeout should be an optional method param. You can still have a default in case it's not set.
public String getSchemaId(String schemaGroup, String schemaName, String schemaString, String schemaType) { if (schemaStringCache.containsKey(schemaString)) { logger.verbose("Cache hit schema string. Group: '{}', name: '{}'", schemaGroup, schemaName); return schemaStringCache.get(schemaString).getSchemaId(); } GetIdBySchemaContentResponse response; try { response = this.restService .getIdBySchemaContentWithResponseAsync(schemaGroup, schemaName, schemaType, schemaString) .block(HTTP_REQUEST_TIMEOUT); } catch (HttpResponseException e) { throw logger.logExceptionAsError(new SchemaRegistryClientException( String.format( "Failed to fetch ID for schema, unexpected service response. Group: '%s', name: '%s'", schemaGroup, schemaName), e)); } if (response == null) { throw logger.logExceptionAsError(new SchemaRegistryClientException("Client returned null response")); } if (response.getStatusCode() == 404) { throw logger.logExceptionAsError(new SchemaRegistryClientException("Existing matching schema not found.")); } SchemaId schemaId = response.getValue(); resetIfNeeded(); schemaStringCache.putIfAbsent( schemaString, new SchemaRegistryObject( schemaId.getId(), schemaType, schemaString.getBytes(SCHEMA_REGISTRY_SERVICE_ENCODING), getParseFunc(schemaType))); logger.verbose("Cached schema string. Group: '{}', name: '{}'", schemaGroup, schemaName); return schemaId.getId(); }
.block(HTTP_REQUEST_TIMEOUT);
public String getSchemaId(String schemaGroup, String schemaName, String schemaString, String schemaType) { if (schemaStringCache.containsKey(schemaString)) { logger.verbose("Cache hit schema string. Group: '{}', name: '{}'", schemaGroup, schemaName); return schemaStringCache.get(schemaString).getSchemaId(); } GetIdBySchemaContentResponse response; try { response = this.restService .getIdBySchemaContentWithResponseAsync(schemaGroup, schemaName, schemaType, schemaString) .block(HTTP_REQUEST_TIMEOUT); } catch (HttpResponseException e) { throw logger.logExceptionAsError(new SchemaRegistryClientException( String.format( "Failed to fetch ID for schema, unexpected service response. Group: '%s', name: '%s'", schemaGroup, schemaName), e)); } if (response == null) { throw logger.logExceptionAsError(new SchemaRegistryClientException("Client returned null response")); } if (response.getStatusCode() == 404) { throw logger.logExceptionAsError(new SchemaRegistryClientException("Existing matching schema not found.")); } SchemaId schemaId = response.getValue(); resetIfNeeded(); schemaStringCache.putIfAbsent( schemaString, new SchemaRegistryObject( schemaId.getId(), schemaType, schemaString.getBytes(SCHEMA_REGISTRY_SERVICE_ENCODING), getParseFunc(schemaType))); logger.verbose("Cached schema string. Group: '{}', name: '{}'", schemaGroup, schemaName); return schemaId.getId(); }
class implementation * @throws IllegalArgumentException on bad schema type or if parser for schema type has already been registered */ public void addSchemaParser(Codec codec) { if (CoreUtils.isNullOrEmpty(codec.schemaType())) { throw logger.logExceptionAsError( new IllegalArgumentException("Serialization type cannot be null or empty.")); } if (this.typeParserMap.containsKey(codec.schemaType())) { throw logger.logExceptionAsError( new IllegalArgumentException("Multiple parse methods for single serialization type may not be added.")); } this.typeParserMap.putIfAbsent(codec.schemaType(), codec::parseSchemaString); logger.verbose( "Loaded parser for '{}' serialization format.", codec.schemaType().toLowerCase(Locale.ROOT)); }
class implementation * @throws IllegalArgumentException on bad schema type or if parser for schema type has already been registered */ public void addSchemaParser(Codec codec) { if (CoreUtils.isNullOrEmpty(codec.schemaType())) { throw logger.logExceptionAsError( new IllegalArgumentException("Serialization type cannot be null or empty.")); } if (this.typeParserMap.containsKey(codec.schemaType())) { throw logger.logExceptionAsError( new IllegalArgumentException("Multiple parse methods for single serialization type may not be added.")); } this.typeParserMap.putIfAbsent(codec.schemaType(), codec::parseSchemaString); logger.verbose( "Loaded parser for '{}' serialization format.", codec.schemaType().toLowerCase(Locale.ROOT)); }
Some say that mutating input parameters isn't good practice. https://softwareengineering.stackexchange.com/questions/245767/is-modifying-an-incoming-parameter-an-antipattern looks like we do a copy few lines below anyway.
public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { try { Objects.requireNonNull(options); final Map<String, String> metadataFinal = options.getMetadata() == null ? new HashMap<>() : options.getMetadata(); options.setMetadata(metadataFinal); Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer( options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE) : options.getDataFlux(); Mono<Flux<ByteBuffer>> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal); return dataFinal.flatMap(df -> super.uploadWithResponse(new BlobParallelUploadOptions(df) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions()).setTimeout(options.getTimeout()))); } catch (RuntimeException ex) { return monoError(logger, ex); } }
options.setMetadata(metadataFinal);
new BlobParallelUploadOptions(df) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(metadataFinal).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions()))); } catch (RuntimeException ex) { return monoError(logger, ex); }
class EncryptedBlobAsyncClient extends BlobAsyncClient { static final int BLOB_DEFAULT_UPLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final long BLOB_MAX_UPLOAD_BLOCK_SIZE = 4000L * Constants.MB; private final ClientLogger logger = new ClientLogger(EncryptedBlobAsyncClient.class); /** * An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content key during encryption. */ private final AsyncKeyEncryptionKey keyWrapper; /** * A {@link String} that is used to wrap/unwrap the content key during encryption. */ private final String keyWrapAlgorithm; /** * Package-private constructor for use by {@link EncryptedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param key The key used to encrypt and decrypt data. * @param keyWrapAlgorithm The algorithm used to wrap/unwrap the key during encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ EncryptedBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, AsyncKeyEncryptionKey key, String keyWrapAlgorithm, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, null, versionId); this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; } /** * Creates a new block blob. By default this method will not overwrite an existing blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) { try { return this.upload(data, parallelTransferOptions, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param overwrite Whether or not to overwrite, should data exist on the blob. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, boolean overwrite) { try { Mono<BlockBlobItem> uploadTask = this.uploadWithResponse(data, parallelTransferOptions, null, null, null, null).flatMap(FluxUtil::toMono); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadWithResponse(new BlobParallelUploadOptions(data) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(AccessTier.HOT).setRequestConditions(requestConditions)); } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param options {@link BlobParallelUploadOptions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { try { Objects.requireNonNull(options); final Map<String, String> metadataFinal = options.getMetadata() == null ? new HashMap<>() : options.getMetadata(); options.setMetadata(metadataFinal); Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer( options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE) : options.getDataFlux(); Mono<Flux<ByteBuffer>> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal); return dataFinal.flatMap(df -> super.uploadWithResponse( } /** * Creates a new block blob with the content of the specified file. By default this method will not overwrite * existing data * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath) { try { return uploadFromFile(filePath, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param overwrite Whether or not to overwrite should data exist on the blob. * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath, boolean overwrite) { try { Mono<Void> uploadTask = uploadFromFile(filePath, null, null, null, null, null); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return An empty response * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 4000MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(tier).setRequestConditions(requestConditions)) .then(); } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFileWithResponse * * @param options {@link BlobUploadFromFileOptions} * @return A reactive response containing the information of the uploaded block blob. * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Response<BlockBlobItem>> uploadFromFileWithResponse(BlobUploadFromFileOptions options) { try { StorageImplUtils.assertNotNull("options", options); return Mono.using(() -> UploadUtils.uploadFileResourceSupplier(options.getFilePath(), logger), channel -> this.uploadWithResponse(new BlobParallelUploadOptions(FluxUtil.readFile(channel)) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions())) .doOnTerminate(() -> { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } }), channel -> UploadUtils.uploadFileCleanup(channel, logger)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Encrypts the given Flux ByteBuffer. * * @param plainTextFlux The Flux ByteBuffer to be encrypted. * * @return A {@link EncryptedBlob} * * @throws InvalidKeyException If the key provided is invalid */ Mono<EncryptedBlob> encryptBlob(Flux<ByteBuffer> plainTextFlux) throws InvalidKeyException { Objects.requireNonNull(this.keyWrapper, "keyWrapper cannot be null"); try { KeyGenerator keyGen = KeyGenerator.getInstance(CryptographyConstants.AES); keyGen.init(256); Cipher cipher = Cipher.getInstance(CryptographyConstants.AES_CBC_PKCS5PADDING); SecretKey aesKey = keyGen.generateKey(); cipher.init(Cipher.ENCRYPT_MODE, aesKey); Map<String, String> keyWrappingMetadata = new HashMap<>(); keyWrappingMetadata.put(CryptographyConstants.AGENT_METADATA_KEY, CryptographyConstants.AGENT_METADATA_VALUE); return this.keyWrapper.wrapKey(keyWrapAlgorithm, aesKey.getEncoded()) .map(encryptedKey -> { WrappedKey wrappedKey = new WrappedKey( this.keyWrapper.getKeyId().block(), encryptedKey, keyWrapAlgorithm); EncryptionData encryptionData = new EncryptionData() .setEncryptionMode(CryptographyConstants.ENCRYPTION_MODE) .setEncryptionAgent( new EncryptionAgent(CryptographyConstants.ENCRYPTION_PROTOCOL_V1, EncryptionAlgorithm.AES_CBC_256)) .setKeyWrappingMetadata(keyWrappingMetadata) .setContentEncryptionIV(cipher.getIV()) .setWrappedContentKey(wrappedKey); Flux<ByteBuffer> encryptedTextFlux = plainTextFlux.map(plainTextBuffer -> { int outputSize = cipher.getOutputSize(plainTextBuffer.remaining()); /* This should be the only place we allocate memory in encryptBlob(). Although there is an overload that can encrypt in place that would save allocations, we do not want to overwrite customer's memory, so we must allocate our own memory. If memory usage becomes unreasonable, we should implement pooling. */ ByteBuffer encryptedTextBuffer = ByteBuffer.allocate(outputSize); int encryptedBytes; try { encryptedBytes = cipher.update(plainTextBuffer, encryptedTextBuffer); } catch (ShortBufferException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } encryptedTextBuffer.position(0); encryptedTextBuffer.limit(encryptedBytes); return encryptedTextBuffer; }); /* Defer() ensures the contained code is not executed until the Flux is subscribed to, in other words, cipher.doFinal() will not be called until the plainTextFlux has completed and therefore all other data has been encrypted. */ encryptedTextFlux = Flux.concat(encryptedTextFlux, Flux.defer(() -> { try { return Flux.just(ByteBuffer.wrap(cipher.doFinal())); } catch (GeneralSecurityException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } })); return new EncryptedBlob(encryptionData, encryptedTextFlux); }); } catch (NoSuchAlgorithmException | NoSuchPaddingException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } } /** * Encrypt the blob and add the encryption metadata to the customer's metadata. * * @param plainText The data to encrypt * @param metadata The customer's metadata to be updated. * * @return A Mono containing the cipher text */ private Mono<Flux<ByteBuffer>> prepareToSendEncryptedRequest(Flux<ByteBuffer> plainText, Map<String, String> metadata) { try { return this.encryptBlob(plainText) .flatMap(encryptedBlob -> { try { metadata.put(CryptographyConstants.ENCRYPTION_DATA_KEY, encryptedBlob.getEncryptionData().toJsonString()); return Mono.just(encryptedBlob.getCiphertextFlux()); } catch (JsonProcessingException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }); } catch (InvalidKeyException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Flux<ByteBuffer> query(String expression) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Mono<BlobQueryAsyncResponse> queryWithResponse(BlobQueryOptions queryOptions) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } }
class EncryptedBlobAsyncClient extends BlobAsyncClient { static final int BLOB_DEFAULT_UPLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final long BLOB_MAX_UPLOAD_BLOCK_SIZE = 4000L * Constants.MB; private final ClientLogger logger = new ClientLogger(EncryptedBlobAsyncClient.class); /** * An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content key during encryption. */ private final AsyncKeyEncryptionKey keyWrapper; /** * A {@link String} that is used to wrap/unwrap the content key during encryption. */ private final String keyWrapAlgorithm; /** * Package-private constructor for use by {@link EncryptedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param key The key used to encrypt and decrypt data. * @param keyWrapAlgorithm The algorithm used to wrap/unwrap the key during encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ EncryptedBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, AsyncKeyEncryptionKey key, String keyWrapAlgorithm, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, null, versionId); this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; } /** * Creates a new block blob. By default this method will not overwrite an existing blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) { try { return this.upload(data, parallelTransferOptions, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param overwrite Whether or not to overwrite, should data exist on the blob. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, boolean overwrite) { try { Mono<BlockBlobItem> uploadTask = this.uploadWithResponse(data, parallelTransferOptions, null, null, null, null).flatMap(FluxUtil::toMono); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadWithResponse(new BlobParallelUploadOptions(data) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(AccessTier.HOT).setRequestConditions(requestConditions)); } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param options {@link BlobParallelUploadOptions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { try { StorageImplUtils.assertNotNull("options", options); final Map<String, String> metadataFinal = options.getMetadata() == null ? new HashMap<>() : options.getMetadata(); Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer( options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE) : options.getDataFlux(); Mono<Flux<ByteBuffer>> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal); return dataFinal.flatMap(df -> super.uploadWithResponse( } /** * Creates a new block blob with the content of the specified file. By default this method will not overwrite * existing data * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath) { try { return uploadFromFile(filePath, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param overwrite Whether or not to overwrite should data exist on the blob. * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath, boolean overwrite) { try { Mono<Void> uploadTask = uploadFromFile(filePath, null, null, null, null, null); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return An empty response * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 4000MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(tier).setRequestConditions(requestConditions)) .then(); } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFileWithResponse * * @param options {@link BlobUploadFromFileOptions} * @return A reactive response containing the information of the uploaded block blob. * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Response<BlockBlobItem>> uploadFromFileWithResponse(BlobUploadFromFileOptions options) { try { StorageImplUtils.assertNotNull("options", options); return Mono.using(() -> UploadUtils.uploadFileResourceSupplier(options.getFilePath(), logger), channel -> this.uploadWithResponse(new BlobParallelUploadOptions(FluxUtil.readFile(channel)) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions())) .doOnTerminate(() -> { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } }), channel -> UploadUtils.uploadFileCleanup(channel, logger)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Encrypts the given Flux ByteBuffer. * * @param plainTextFlux The Flux ByteBuffer to be encrypted. * * @return A {@link EncryptedBlob} * * @throws InvalidKeyException If the key provided is invalid */ Mono<EncryptedBlob> encryptBlob(Flux<ByteBuffer> plainTextFlux) throws InvalidKeyException { Objects.requireNonNull(this.keyWrapper, "keyWrapper cannot be null"); try { KeyGenerator keyGen = KeyGenerator.getInstance(CryptographyConstants.AES); keyGen.init(256); Cipher cipher = Cipher.getInstance(CryptographyConstants.AES_CBC_PKCS5PADDING); SecretKey aesKey = keyGen.generateKey(); cipher.init(Cipher.ENCRYPT_MODE, aesKey); Map<String, String> keyWrappingMetadata = new HashMap<>(); keyWrappingMetadata.put(CryptographyConstants.AGENT_METADATA_KEY, CryptographyConstants.AGENT_METADATA_VALUE); return this.keyWrapper.wrapKey(keyWrapAlgorithm, aesKey.getEncoded()) .map(encryptedKey -> { WrappedKey wrappedKey = new WrappedKey( this.keyWrapper.getKeyId().block(), encryptedKey, keyWrapAlgorithm); EncryptionData encryptionData = new EncryptionData() .setEncryptionMode(CryptographyConstants.ENCRYPTION_MODE) .setEncryptionAgent( new EncryptionAgent(CryptographyConstants.ENCRYPTION_PROTOCOL_V1, EncryptionAlgorithm.AES_CBC_256)) .setKeyWrappingMetadata(keyWrappingMetadata) .setContentEncryptionIV(cipher.getIV()) .setWrappedContentKey(wrappedKey); Flux<ByteBuffer> encryptedTextFlux = plainTextFlux.map(plainTextBuffer -> { int outputSize = cipher.getOutputSize(plainTextBuffer.remaining()); /* This should be the only place we allocate memory in encryptBlob(). Although there is an overload that can encrypt in place that would save allocations, we do not want to overwrite customer's memory, so we must allocate our own memory. If memory usage becomes unreasonable, we should implement pooling. */ ByteBuffer encryptedTextBuffer = ByteBuffer.allocate(outputSize); int encryptedBytes; try { encryptedBytes = cipher.update(plainTextBuffer, encryptedTextBuffer); } catch (ShortBufferException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } encryptedTextBuffer.position(0); encryptedTextBuffer.limit(encryptedBytes); return encryptedTextBuffer; }); /* Defer() ensures the contained code is not executed until the Flux is subscribed to, in other words, cipher.doFinal() will not be called until the plainTextFlux has completed and therefore all other data has been encrypted. */ encryptedTextFlux = Flux.concat(encryptedTextFlux, Flux.defer(() -> { try { return Flux.just(ByteBuffer.wrap(cipher.doFinal())); } catch (GeneralSecurityException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } })); return new EncryptedBlob(encryptionData, encryptedTextFlux); }); } catch (NoSuchAlgorithmException | NoSuchPaddingException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } } /** * Encrypt the blob and add the encryption metadata to the customer's metadata. * * @param plainText The data to encrypt * @param metadata The customer's metadata to be updated. * * @return A Mono containing the cipher text */ private Mono<Flux<ByteBuffer>> prepareToSendEncryptedRequest(Flux<ByteBuffer> plainText, Map<String, String> metadata) { try { return this.encryptBlob(plainText) .flatMap(encryptedBlob -> { try { metadata.put(CryptographyConstants.ENCRYPTION_DATA_KEY, encryptedBlob.getEncryptionData().toJsonString()); return Mono.just(encryptedBlob.getCiphertextFlux()); } catch (JsonProcessingException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }); } catch (InvalidKeyException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Flux<ByteBuffer> query(String expression) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Mono<BlobQueryAsyncResponse> queryWithResponse(BlobQueryOptions queryOptions) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } }
I don't have to set it on the options. I can use a local variable if that's better?
public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { try { Objects.requireNonNull(options); final Map<String, String> metadataFinal = options.getMetadata() == null ? new HashMap<>() : options.getMetadata(); options.setMetadata(metadataFinal); Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer( options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE) : options.getDataFlux(); Mono<Flux<ByteBuffer>> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal); return dataFinal.flatMap(df -> super.uploadWithResponse(new BlobParallelUploadOptions(df) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions()).setTimeout(options.getTimeout()))); } catch (RuntimeException ex) { return monoError(logger, ex); } }
options.setMetadata(metadataFinal);
new BlobParallelUploadOptions(df) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(metadataFinal).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions()))); } catch (RuntimeException ex) { return monoError(logger, ex); }
class EncryptedBlobAsyncClient extends BlobAsyncClient { static final int BLOB_DEFAULT_UPLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final long BLOB_MAX_UPLOAD_BLOCK_SIZE = 4000L * Constants.MB; private final ClientLogger logger = new ClientLogger(EncryptedBlobAsyncClient.class); /** * An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content key during encryption. */ private final AsyncKeyEncryptionKey keyWrapper; /** * A {@link String} that is used to wrap/unwrap the content key during encryption. */ private final String keyWrapAlgorithm; /** * Package-private constructor for use by {@link EncryptedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param key The key used to encrypt and decrypt data. * @param keyWrapAlgorithm The algorithm used to wrap/unwrap the key during encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ EncryptedBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, AsyncKeyEncryptionKey key, String keyWrapAlgorithm, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, null, versionId); this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; } /** * Creates a new block blob. By default this method will not overwrite an existing blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) { try { return this.upload(data, parallelTransferOptions, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param overwrite Whether or not to overwrite, should data exist on the blob. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, boolean overwrite) { try { Mono<BlockBlobItem> uploadTask = this.uploadWithResponse(data, parallelTransferOptions, null, null, null, null).flatMap(FluxUtil::toMono); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadWithResponse(new BlobParallelUploadOptions(data) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(AccessTier.HOT).setRequestConditions(requestConditions)); } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param options {@link BlobParallelUploadOptions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { try { Objects.requireNonNull(options); final Map<String, String> metadataFinal = options.getMetadata() == null ? new HashMap<>() : options.getMetadata(); options.setMetadata(metadataFinal); Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer( options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE) : options.getDataFlux(); Mono<Flux<ByteBuffer>> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal); return dataFinal.flatMap(df -> super.uploadWithResponse( } /** * Creates a new block blob with the content of the specified file. By default this method will not overwrite * existing data * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath) { try { return uploadFromFile(filePath, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param overwrite Whether or not to overwrite should data exist on the blob. * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath, boolean overwrite) { try { Mono<Void> uploadTask = uploadFromFile(filePath, null, null, null, null, null); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return An empty response * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 4000MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(tier).setRequestConditions(requestConditions)) .then(); } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFileWithResponse * * @param options {@link BlobUploadFromFileOptions} * @return A reactive response containing the information of the uploaded block blob. * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Response<BlockBlobItem>> uploadFromFileWithResponse(BlobUploadFromFileOptions options) { try { StorageImplUtils.assertNotNull("options", options); return Mono.using(() -> UploadUtils.uploadFileResourceSupplier(options.getFilePath(), logger), channel -> this.uploadWithResponse(new BlobParallelUploadOptions(FluxUtil.readFile(channel)) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions())) .doOnTerminate(() -> { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } }), channel -> UploadUtils.uploadFileCleanup(channel, logger)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Encrypts the given Flux ByteBuffer. * * @param plainTextFlux The Flux ByteBuffer to be encrypted. * * @return A {@link EncryptedBlob} * * @throws InvalidKeyException If the key provided is invalid */ Mono<EncryptedBlob> encryptBlob(Flux<ByteBuffer> plainTextFlux) throws InvalidKeyException { Objects.requireNonNull(this.keyWrapper, "keyWrapper cannot be null"); try { KeyGenerator keyGen = KeyGenerator.getInstance(CryptographyConstants.AES); keyGen.init(256); Cipher cipher = Cipher.getInstance(CryptographyConstants.AES_CBC_PKCS5PADDING); SecretKey aesKey = keyGen.generateKey(); cipher.init(Cipher.ENCRYPT_MODE, aesKey); Map<String, String> keyWrappingMetadata = new HashMap<>(); keyWrappingMetadata.put(CryptographyConstants.AGENT_METADATA_KEY, CryptographyConstants.AGENT_METADATA_VALUE); return this.keyWrapper.wrapKey(keyWrapAlgorithm, aesKey.getEncoded()) .map(encryptedKey -> { WrappedKey wrappedKey = new WrappedKey( this.keyWrapper.getKeyId().block(), encryptedKey, keyWrapAlgorithm); EncryptionData encryptionData = new EncryptionData() .setEncryptionMode(CryptographyConstants.ENCRYPTION_MODE) .setEncryptionAgent( new EncryptionAgent(CryptographyConstants.ENCRYPTION_PROTOCOL_V1, EncryptionAlgorithm.AES_CBC_256)) .setKeyWrappingMetadata(keyWrappingMetadata) .setContentEncryptionIV(cipher.getIV()) .setWrappedContentKey(wrappedKey); Flux<ByteBuffer> encryptedTextFlux = plainTextFlux.map(plainTextBuffer -> { int outputSize = cipher.getOutputSize(plainTextBuffer.remaining()); /* This should be the only place we allocate memory in encryptBlob(). Although there is an overload that can encrypt in place that would save allocations, we do not want to overwrite customer's memory, so we must allocate our own memory. If memory usage becomes unreasonable, we should implement pooling. */ ByteBuffer encryptedTextBuffer = ByteBuffer.allocate(outputSize); int encryptedBytes; try { encryptedBytes = cipher.update(plainTextBuffer, encryptedTextBuffer); } catch (ShortBufferException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } encryptedTextBuffer.position(0); encryptedTextBuffer.limit(encryptedBytes); return encryptedTextBuffer; }); /* Defer() ensures the contained code is not executed until the Flux is subscribed to, in other words, cipher.doFinal() will not be called until the plainTextFlux has completed and therefore all other data has been encrypted. */ encryptedTextFlux = Flux.concat(encryptedTextFlux, Flux.defer(() -> { try { return Flux.just(ByteBuffer.wrap(cipher.doFinal())); } catch (GeneralSecurityException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } })); return new EncryptedBlob(encryptionData, encryptedTextFlux); }); } catch (NoSuchAlgorithmException | NoSuchPaddingException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } } /** * Encrypt the blob and add the encryption metadata to the customer's metadata. * * @param plainText The data to encrypt * @param metadata The customer's metadata to be updated. * * @return A Mono containing the cipher text */ private Mono<Flux<ByteBuffer>> prepareToSendEncryptedRequest(Flux<ByteBuffer> plainText, Map<String, String> metadata) { try { return this.encryptBlob(plainText) .flatMap(encryptedBlob -> { try { metadata.put(CryptographyConstants.ENCRYPTION_DATA_KEY, encryptedBlob.getEncryptionData().toJsonString()); return Mono.just(encryptedBlob.getCiphertextFlux()); } catch (JsonProcessingException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }); } catch (InvalidKeyException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Flux<ByteBuffer> query(String expression) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Mono<BlobQueryAsyncResponse> queryWithResponse(BlobQueryOptions queryOptions) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } }
class EncryptedBlobAsyncClient extends BlobAsyncClient { static final int BLOB_DEFAULT_UPLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final long BLOB_MAX_UPLOAD_BLOCK_SIZE = 4000L * Constants.MB; private final ClientLogger logger = new ClientLogger(EncryptedBlobAsyncClient.class); /** * An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content key during encryption. */ private final AsyncKeyEncryptionKey keyWrapper; /** * A {@link String} that is used to wrap/unwrap the content key during encryption. */ private final String keyWrapAlgorithm; /** * Package-private constructor for use by {@link EncryptedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param key The key used to encrypt and decrypt data. * @param keyWrapAlgorithm The algorithm used to wrap/unwrap the key during encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ EncryptedBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, AsyncKeyEncryptionKey key, String keyWrapAlgorithm, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, null, versionId); this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; } /** * Creates a new block blob. By default this method will not overwrite an existing blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) { try { return this.upload(data, parallelTransferOptions, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param overwrite Whether or not to overwrite, should data exist on the blob. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, boolean overwrite) { try { Mono<BlockBlobItem> uploadTask = this.uploadWithResponse(data, parallelTransferOptions, null, null, null, null).flatMap(FluxUtil::toMono); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadWithResponse(new BlobParallelUploadOptions(data) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(AccessTier.HOT).setRequestConditions(requestConditions)); } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param options {@link BlobParallelUploadOptions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { try { StorageImplUtils.assertNotNull("options", options); final Map<String, String> metadataFinal = options.getMetadata() == null ? new HashMap<>() : options.getMetadata(); Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer( options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE) : options.getDataFlux(); Mono<Flux<ByteBuffer>> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal); return dataFinal.flatMap(df -> super.uploadWithResponse( } /** * Creates a new block blob with the content of the specified file. By default this method will not overwrite * existing data * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath) { try { return uploadFromFile(filePath, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param overwrite Whether or not to overwrite should data exist on the blob. * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath, boolean overwrite) { try { Mono<Void> uploadTask = uploadFromFile(filePath, null, null, null, null, null); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return An empty response * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 4000MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(tier).setRequestConditions(requestConditions)) .then(); } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFileWithResponse * * @param options {@link BlobUploadFromFileOptions} * @return A reactive response containing the information of the uploaded block blob. * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Response<BlockBlobItem>> uploadFromFileWithResponse(BlobUploadFromFileOptions options) { try { StorageImplUtils.assertNotNull("options", options); return Mono.using(() -> UploadUtils.uploadFileResourceSupplier(options.getFilePath(), logger), channel -> this.uploadWithResponse(new BlobParallelUploadOptions(FluxUtil.readFile(channel)) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions())) .doOnTerminate(() -> { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } }), channel -> UploadUtils.uploadFileCleanup(channel, logger)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Encrypts the given Flux ByteBuffer. * * @param plainTextFlux The Flux ByteBuffer to be encrypted. * * @return A {@link EncryptedBlob} * * @throws InvalidKeyException If the key provided is invalid */ Mono<EncryptedBlob> encryptBlob(Flux<ByteBuffer> plainTextFlux) throws InvalidKeyException { Objects.requireNonNull(this.keyWrapper, "keyWrapper cannot be null"); try { KeyGenerator keyGen = KeyGenerator.getInstance(CryptographyConstants.AES); keyGen.init(256); Cipher cipher = Cipher.getInstance(CryptographyConstants.AES_CBC_PKCS5PADDING); SecretKey aesKey = keyGen.generateKey(); cipher.init(Cipher.ENCRYPT_MODE, aesKey); Map<String, String> keyWrappingMetadata = new HashMap<>(); keyWrappingMetadata.put(CryptographyConstants.AGENT_METADATA_KEY, CryptographyConstants.AGENT_METADATA_VALUE); return this.keyWrapper.wrapKey(keyWrapAlgorithm, aesKey.getEncoded()) .map(encryptedKey -> { WrappedKey wrappedKey = new WrappedKey( this.keyWrapper.getKeyId().block(), encryptedKey, keyWrapAlgorithm); EncryptionData encryptionData = new EncryptionData() .setEncryptionMode(CryptographyConstants.ENCRYPTION_MODE) .setEncryptionAgent( new EncryptionAgent(CryptographyConstants.ENCRYPTION_PROTOCOL_V1, EncryptionAlgorithm.AES_CBC_256)) .setKeyWrappingMetadata(keyWrappingMetadata) .setContentEncryptionIV(cipher.getIV()) .setWrappedContentKey(wrappedKey); Flux<ByteBuffer> encryptedTextFlux = plainTextFlux.map(plainTextBuffer -> { int outputSize = cipher.getOutputSize(plainTextBuffer.remaining()); /* This should be the only place we allocate memory in encryptBlob(). Although there is an overload that can encrypt in place that would save allocations, we do not want to overwrite customer's memory, so we must allocate our own memory. If memory usage becomes unreasonable, we should implement pooling. */ ByteBuffer encryptedTextBuffer = ByteBuffer.allocate(outputSize); int encryptedBytes; try { encryptedBytes = cipher.update(plainTextBuffer, encryptedTextBuffer); } catch (ShortBufferException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } encryptedTextBuffer.position(0); encryptedTextBuffer.limit(encryptedBytes); return encryptedTextBuffer; }); /* Defer() ensures the contained code is not executed until the Flux is subscribed to, in other words, cipher.doFinal() will not be called until the plainTextFlux has completed and therefore all other data has been encrypted. */ encryptedTextFlux = Flux.concat(encryptedTextFlux, Flux.defer(() -> { try { return Flux.just(ByteBuffer.wrap(cipher.doFinal())); } catch (GeneralSecurityException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } })); return new EncryptedBlob(encryptionData, encryptedTextFlux); }); } catch (NoSuchAlgorithmException | NoSuchPaddingException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } } /** * Encrypt the blob and add the encryption metadata to the customer's metadata. * * @param plainText The data to encrypt * @param metadata The customer's metadata to be updated. * * @return A Mono containing the cipher text */ private Mono<Flux<ByteBuffer>> prepareToSendEncryptedRequest(Flux<ByteBuffer> plainText, Map<String, String> metadata) { try { return this.encryptBlob(plainText) .flatMap(encryptedBlob -> { try { metadata.put(CryptographyConstants.ENCRYPTION_DATA_KEY, encryptedBlob.getEncryptionData().toJsonString()); return Mono.just(encryptedBlob.getCiphertextFlux()); } catch (JsonProcessingException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }); } catch (InvalidKeyException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Flux<ByteBuffer> query(String expression) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Mono<BlobQueryAsyncResponse> queryWithResponse(BlobQueryOptions queryOptions) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } }
Yes. I thought I went through and changed all these, but I guess I missed some. Nice catch!
public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { try { Objects.requireNonNull(options); final Map<String, String> metadataFinal = options.getMetadata() == null ? new HashMap<>() : options.getMetadata(); options.setMetadata(metadataFinal); Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer( options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE) : options.getDataFlux(); Mono<Flux<ByteBuffer>> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal); return dataFinal.flatMap(df -> super.uploadWithResponse(new BlobParallelUploadOptions(df) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions()).setTimeout(options.getTimeout()))); } catch (RuntimeException ex) { return monoError(logger, ex); } }
Objects.requireNonNull(options);
new BlobParallelUploadOptions(df) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(metadataFinal).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions()))); } catch (RuntimeException ex) { return monoError(logger, ex); }
class EncryptedBlobAsyncClient extends BlobAsyncClient { static final int BLOB_DEFAULT_UPLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final long BLOB_MAX_UPLOAD_BLOCK_SIZE = 4000L * Constants.MB; private final ClientLogger logger = new ClientLogger(EncryptedBlobAsyncClient.class); /** * An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content key during encryption. */ private final AsyncKeyEncryptionKey keyWrapper; /** * A {@link String} that is used to wrap/unwrap the content key during encryption. */ private final String keyWrapAlgorithm; /** * Package-private constructor for use by {@link EncryptedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param key The key used to encrypt and decrypt data. * @param keyWrapAlgorithm The algorithm used to wrap/unwrap the key during encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ EncryptedBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, AsyncKeyEncryptionKey key, String keyWrapAlgorithm, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, null, versionId); this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; } /** * Creates a new block blob. By default this method will not overwrite an existing blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) { try { return this.upload(data, parallelTransferOptions, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param overwrite Whether or not to overwrite, should data exist on the blob. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, boolean overwrite) { try { Mono<BlockBlobItem> uploadTask = this.uploadWithResponse(data, parallelTransferOptions, null, null, null, null).flatMap(FluxUtil::toMono); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadWithResponse(new BlobParallelUploadOptions(data) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(AccessTier.HOT).setRequestConditions(requestConditions)); } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param options {@link BlobParallelUploadOptions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { try { Objects.requireNonNull(options); final Map<String, String> metadataFinal = options.getMetadata() == null ? new HashMap<>() : options.getMetadata(); options.setMetadata(metadataFinal); Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer( options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE) : options.getDataFlux(); Mono<Flux<ByteBuffer>> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal); return dataFinal.flatMap(df -> super.uploadWithResponse( } /** * Creates a new block blob with the content of the specified file. By default this method will not overwrite * existing data * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath) { try { return uploadFromFile(filePath, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param overwrite Whether or not to overwrite should data exist on the blob. * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath, boolean overwrite) { try { Mono<Void> uploadTask = uploadFromFile(filePath, null, null, null, null, null); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return An empty response * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 4000MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(tier).setRequestConditions(requestConditions)) .then(); } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFileWithResponse * * @param options {@link BlobUploadFromFileOptions} * @return A reactive response containing the information of the uploaded block blob. * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Response<BlockBlobItem>> uploadFromFileWithResponse(BlobUploadFromFileOptions options) { try { StorageImplUtils.assertNotNull("options", options); return Mono.using(() -> UploadUtils.uploadFileResourceSupplier(options.getFilePath(), logger), channel -> this.uploadWithResponse(new BlobParallelUploadOptions(FluxUtil.readFile(channel)) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions())) .doOnTerminate(() -> { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } }), channel -> UploadUtils.uploadFileCleanup(channel, logger)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Encrypts the given Flux ByteBuffer. * * @param plainTextFlux The Flux ByteBuffer to be encrypted. * * @return A {@link EncryptedBlob} * * @throws InvalidKeyException If the key provided is invalid */ Mono<EncryptedBlob> encryptBlob(Flux<ByteBuffer> plainTextFlux) throws InvalidKeyException { Objects.requireNonNull(this.keyWrapper, "keyWrapper cannot be null"); try { KeyGenerator keyGen = KeyGenerator.getInstance(CryptographyConstants.AES); keyGen.init(256); Cipher cipher = Cipher.getInstance(CryptographyConstants.AES_CBC_PKCS5PADDING); SecretKey aesKey = keyGen.generateKey(); cipher.init(Cipher.ENCRYPT_MODE, aesKey); Map<String, String> keyWrappingMetadata = new HashMap<>(); keyWrappingMetadata.put(CryptographyConstants.AGENT_METADATA_KEY, CryptographyConstants.AGENT_METADATA_VALUE); return this.keyWrapper.wrapKey(keyWrapAlgorithm, aesKey.getEncoded()) .map(encryptedKey -> { WrappedKey wrappedKey = new WrappedKey( this.keyWrapper.getKeyId().block(), encryptedKey, keyWrapAlgorithm); EncryptionData encryptionData = new EncryptionData() .setEncryptionMode(CryptographyConstants.ENCRYPTION_MODE) .setEncryptionAgent( new EncryptionAgent(CryptographyConstants.ENCRYPTION_PROTOCOL_V1, EncryptionAlgorithm.AES_CBC_256)) .setKeyWrappingMetadata(keyWrappingMetadata) .setContentEncryptionIV(cipher.getIV()) .setWrappedContentKey(wrappedKey); Flux<ByteBuffer> encryptedTextFlux = plainTextFlux.map(plainTextBuffer -> { int outputSize = cipher.getOutputSize(plainTextBuffer.remaining()); /* This should be the only place we allocate memory in encryptBlob(). Although there is an overload that can encrypt in place that would save allocations, we do not want to overwrite customer's memory, so we must allocate our own memory. If memory usage becomes unreasonable, we should implement pooling. */ ByteBuffer encryptedTextBuffer = ByteBuffer.allocate(outputSize); int encryptedBytes; try { encryptedBytes = cipher.update(plainTextBuffer, encryptedTextBuffer); } catch (ShortBufferException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } encryptedTextBuffer.position(0); encryptedTextBuffer.limit(encryptedBytes); return encryptedTextBuffer; }); /* Defer() ensures the contained code is not executed until the Flux is subscribed to, in other words, cipher.doFinal() will not be called until the plainTextFlux has completed and therefore all other data has been encrypted. */ encryptedTextFlux = Flux.concat(encryptedTextFlux, Flux.defer(() -> { try { return Flux.just(ByteBuffer.wrap(cipher.doFinal())); } catch (GeneralSecurityException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } })); return new EncryptedBlob(encryptionData, encryptedTextFlux); }); } catch (NoSuchAlgorithmException | NoSuchPaddingException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } } /** * Encrypt the blob and add the encryption metadata to the customer's metadata. * * @param plainText The data to encrypt * @param metadata The customer's metadata to be updated. * * @return A Mono containing the cipher text */ private Mono<Flux<ByteBuffer>> prepareToSendEncryptedRequest(Flux<ByteBuffer> plainText, Map<String, String> metadata) { try { return this.encryptBlob(plainText) .flatMap(encryptedBlob -> { try { metadata.put(CryptographyConstants.ENCRYPTION_DATA_KEY, encryptedBlob.getEncryptionData().toJsonString()); return Mono.just(encryptedBlob.getCiphertextFlux()); } catch (JsonProcessingException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }); } catch (InvalidKeyException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Flux<ByteBuffer> query(String expression) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Mono<BlobQueryAsyncResponse> queryWithResponse(BlobQueryOptions queryOptions) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } }
class EncryptedBlobAsyncClient extends BlobAsyncClient { static final int BLOB_DEFAULT_UPLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final long BLOB_MAX_UPLOAD_BLOCK_SIZE = 4000L * Constants.MB; private final ClientLogger logger = new ClientLogger(EncryptedBlobAsyncClient.class); /** * An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content key during encryption. */ private final AsyncKeyEncryptionKey keyWrapper; /** * A {@link String} that is used to wrap/unwrap the content key during encryption. */ private final String keyWrapAlgorithm; /** * Package-private constructor for use by {@link EncryptedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param key The key used to encrypt and decrypt data. * @param keyWrapAlgorithm The algorithm used to wrap/unwrap the key during encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ EncryptedBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, AsyncKeyEncryptionKey key, String keyWrapAlgorithm, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, null, versionId); this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; } /** * Creates a new block blob. By default this method will not overwrite an existing blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) { try { return this.upload(data, parallelTransferOptions, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param overwrite Whether or not to overwrite, should data exist on the blob. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, boolean overwrite) { try { Mono<BlockBlobItem> uploadTask = this.uploadWithResponse(data, parallelTransferOptions, null, null, null, null).flatMap(FluxUtil::toMono); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadWithResponse(new BlobParallelUploadOptions(data) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(AccessTier.HOT).setRequestConditions(requestConditions)); } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param options {@link BlobParallelUploadOptions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { try { StorageImplUtils.assertNotNull("options", options); final Map<String, String> metadataFinal = options.getMetadata() == null ? new HashMap<>() : options.getMetadata(); Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer( options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE) : options.getDataFlux(); Mono<Flux<ByteBuffer>> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal); return dataFinal.flatMap(df -> super.uploadWithResponse( } /** * Creates a new block blob with the content of the specified file. By default this method will not overwrite * existing data * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath) { try { return uploadFromFile(filePath, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param overwrite Whether or not to overwrite should data exist on the blob. * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath, boolean overwrite) { try { Mono<Void> uploadTask = uploadFromFile(filePath, null, null, null, null, null); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return An empty response * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 4000MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(tier).setRequestConditions(requestConditions)) .then(); } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFileWithResponse * * @param options {@link BlobUploadFromFileOptions} * @return A reactive response containing the information of the uploaded block blob. * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Response<BlockBlobItem>> uploadFromFileWithResponse(BlobUploadFromFileOptions options) { try { StorageImplUtils.assertNotNull("options", options); return Mono.using(() -> UploadUtils.uploadFileResourceSupplier(options.getFilePath(), logger), channel -> this.uploadWithResponse(new BlobParallelUploadOptions(FluxUtil.readFile(channel)) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions())) .doOnTerminate(() -> { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } }), channel -> UploadUtils.uploadFileCleanup(channel, logger)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Encrypts the given Flux ByteBuffer. * * @param plainTextFlux The Flux ByteBuffer to be encrypted. * * @return A {@link EncryptedBlob} * * @throws InvalidKeyException If the key provided is invalid */ Mono<EncryptedBlob> encryptBlob(Flux<ByteBuffer> plainTextFlux) throws InvalidKeyException { Objects.requireNonNull(this.keyWrapper, "keyWrapper cannot be null"); try { KeyGenerator keyGen = KeyGenerator.getInstance(CryptographyConstants.AES); keyGen.init(256); Cipher cipher = Cipher.getInstance(CryptographyConstants.AES_CBC_PKCS5PADDING); SecretKey aesKey = keyGen.generateKey(); cipher.init(Cipher.ENCRYPT_MODE, aesKey); Map<String, String> keyWrappingMetadata = new HashMap<>(); keyWrappingMetadata.put(CryptographyConstants.AGENT_METADATA_KEY, CryptographyConstants.AGENT_METADATA_VALUE); return this.keyWrapper.wrapKey(keyWrapAlgorithm, aesKey.getEncoded()) .map(encryptedKey -> { WrappedKey wrappedKey = new WrappedKey( this.keyWrapper.getKeyId().block(), encryptedKey, keyWrapAlgorithm); EncryptionData encryptionData = new EncryptionData() .setEncryptionMode(CryptographyConstants.ENCRYPTION_MODE) .setEncryptionAgent( new EncryptionAgent(CryptographyConstants.ENCRYPTION_PROTOCOL_V1, EncryptionAlgorithm.AES_CBC_256)) .setKeyWrappingMetadata(keyWrappingMetadata) .setContentEncryptionIV(cipher.getIV()) .setWrappedContentKey(wrappedKey); Flux<ByteBuffer> encryptedTextFlux = plainTextFlux.map(plainTextBuffer -> { int outputSize = cipher.getOutputSize(plainTextBuffer.remaining()); /* This should be the only place we allocate memory in encryptBlob(). Although there is an overload that can encrypt in place that would save allocations, we do not want to overwrite customer's memory, so we must allocate our own memory. If memory usage becomes unreasonable, we should implement pooling. */ ByteBuffer encryptedTextBuffer = ByteBuffer.allocate(outputSize); int encryptedBytes; try { encryptedBytes = cipher.update(plainTextBuffer, encryptedTextBuffer); } catch (ShortBufferException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } encryptedTextBuffer.position(0); encryptedTextBuffer.limit(encryptedBytes); return encryptedTextBuffer; }); /* Defer() ensures the contained code is not executed until the Flux is subscribed to, in other words, cipher.doFinal() will not be called until the plainTextFlux has completed and therefore all other data has been encrypted. */ encryptedTextFlux = Flux.concat(encryptedTextFlux, Flux.defer(() -> { try { return Flux.just(ByteBuffer.wrap(cipher.doFinal())); } catch (GeneralSecurityException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } })); return new EncryptedBlob(encryptionData, encryptedTextFlux); }); } catch (NoSuchAlgorithmException | NoSuchPaddingException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } } /** * Encrypt the blob and add the encryption metadata to the customer's metadata. * * @param plainText The data to encrypt * @param metadata The customer's metadata to be updated. * * @return A Mono containing the cipher text */ private Mono<Flux<ByteBuffer>> prepareToSendEncryptedRequest(Flux<ByteBuffer> plainText, Map<String, String> metadata) { try { return this.encryptBlob(plainText) .flatMap(encryptedBlob -> { try { metadata.put(CryptographyConstants.ENCRYPTION_DATA_KEY, encryptedBlob.getEncryptionData().toJsonString()); return Mono.just(encryptedBlob.getCiphertextFlux()); } catch (JsonProcessingException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }); } catch (InvalidKeyException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Flux<ByteBuffer> query(String expression) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Mono<BlobQueryAsyncResponse> queryWithResponse(BlobQueryOptions queryOptions) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } }
sounds good,
public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { try { Objects.requireNonNull(options); final Map<String, String> metadataFinal = options.getMetadata() == null ? new HashMap<>() : options.getMetadata(); options.setMetadata(metadataFinal); Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer( options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE) : options.getDataFlux(); Mono<Flux<ByteBuffer>> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal); return dataFinal.flatMap(df -> super.uploadWithResponse(new BlobParallelUploadOptions(df) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions()).setTimeout(options.getTimeout()))); } catch (RuntimeException ex) { return monoError(logger, ex); } }
options.setMetadata(metadataFinal);
new BlobParallelUploadOptions(df) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(metadataFinal).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions()))); } catch (RuntimeException ex) { return monoError(logger, ex); }
class EncryptedBlobAsyncClient extends BlobAsyncClient { static final int BLOB_DEFAULT_UPLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final long BLOB_MAX_UPLOAD_BLOCK_SIZE = 4000L * Constants.MB; private final ClientLogger logger = new ClientLogger(EncryptedBlobAsyncClient.class); /** * An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content key during encryption. */ private final AsyncKeyEncryptionKey keyWrapper; /** * A {@link String} that is used to wrap/unwrap the content key during encryption. */ private final String keyWrapAlgorithm; /** * Package-private constructor for use by {@link EncryptedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param key The key used to encrypt and decrypt data. * @param keyWrapAlgorithm The algorithm used to wrap/unwrap the key during encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ EncryptedBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, AsyncKeyEncryptionKey key, String keyWrapAlgorithm, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, null, versionId); this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; } /** * Creates a new block blob. By default this method will not overwrite an existing blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) { try { return this.upload(data, parallelTransferOptions, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param overwrite Whether or not to overwrite, should data exist on the blob. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, boolean overwrite) { try { Mono<BlockBlobItem> uploadTask = this.uploadWithResponse(data, parallelTransferOptions, null, null, null, null).flatMap(FluxUtil::toMono); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadWithResponse(new BlobParallelUploadOptions(data) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(AccessTier.HOT).setRequestConditions(requestConditions)); } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param options {@link BlobParallelUploadOptions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { try { Objects.requireNonNull(options); final Map<String, String> metadataFinal = options.getMetadata() == null ? new HashMap<>() : options.getMetadata(); options.setMetadata(metadataFinal); Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer( options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE) : options.getDataFlux(); Mono<Flux<ByteBuffer>> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal); return dataFinal.flatMap(df -> super.uploadWithResponse( } /** * Creates a new block blob with the content of the specified file. By default this method will not overwrite * existing data * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath) { try { return uploadFromFile(filePath, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param overwrite Whether or not to overwrite should data exist on the blob. * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath, boolean overwrite) { try { Mono<Void> uploadTask = uploadFromFile(filePath, null, null, null, null, null); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return An empty response * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 4000MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(tier).setRequestConditions(requestConditions)) .then(); } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFileWithResponse * * @param options {@link BlobUploadFromFileOptions} * @return A reactive response containing the information of the uploaded block blob. * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Response<BlockBlobItem>> uploadFromFileWithResponse(BlobUploadFromFileOptions options) { try { StorageImplUtils.assertNotNull("options", options); return Mono.using(() -> UploadUtils.uploadFileResourceSupplier(options.getFilePath(), logger), channel -> this.uploadWithResponse(new BlobParallelUploadOptions(FluxUtil.readFile(channel)) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions())) .doOnTerminate(() -> { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } }), channel -> UploadUtils.uploadFileCleanup(channel, logger)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Encrypts the given Flux ByteBuffer. * * @param plainTextFlux The Flux ByteBuffer to be encrypted. * * @return A {@link EncryptedBlob} * * @throws InvalidKeyException If the key provided is invalid */ Mono<EncryptedBlob> encryptBlob(Flux<ByteBuffer> plainTextFlux) throws InvalidKeyException { Objects.requireNonNull(this.keyWrapper, "keyWrapper cannot be null"); try { KeyGenerator keyGen = KeyGenerator.getInstance(CryptographyConstants.AES); keyGen.init(256); Cipher cipher = Cipher.getInstance(CryptographyConstants.AES_CBC_PKCS5PADDING); SecretKey aesKey = keyGen.generateKey(); cipher.init(Cipher.ENCRYPT_MODE, aesKey); Map<String, String> keyWrappingMetadata = new HashMap<>(); keyWrappingMetadata.put(CryptographyConstants.AGENT_METADATA_KEY, CryptographyConstants.AGENT_METADATA_VALUE); return this.keyWrapper.wrapKey(keyWrapAlgorithm, aesKey.getEncoded()) .map(encryptedKey -> { WrappedKey wrappedKey = new WrappedKey( this.keyWrapper.getKeyId().block(), encryptedKey, keyWrapAlgorithm); EncryptionData encryptionData = new EncryptionData() .setEncryptionMode(CryptographyConstants.ENCRYPTION_MODE) .setEncryptionAgent( new EncryptionAgent(CryptographyConstants.ENCRYPTION_PROTOCOL_V1, EncryptionAlgorithm.AES_CBC_256)) .setKeyWrappingMetadata(keyWrappingMetadata) .setContentEncryptionIV(cipher.getIV()) .setWrappedContentKey(wrappedKey); Flux<ByteBuffer> encryptedTextFlux = plainTextFlux.map(plainTextBuffer -> { int outputSize = cipher.getOutputSize(plainTextBuffer.remaining()); /* This should be the only place we allocate memory in encryptBlob(). Although there is an overload that can encrypt in place that would save allocations, we do not want to overwrite customer's memory, so we must allocate our own memory. If memory usage becomes unreasonable, we should implement pooling. */ ByteBuffer encryptedTextBuffer = ByteBuffer.allocate(outputSize); int encryptedBytes; try { encryptedBytes = cipher.update(plainTextBuffer, encryptedTextBuffer); } catch (ShortBufferException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } encryptedTextBuffer.position(0); encryptedTextBuffer.limit(encryptedBytes); return encryptedTextBuffer; }); /* Defer() ensures the contained code is not executed until the Flux is subscribed to, in other words, cipher.doFinal() will not be called until the plainTextFlux has completed and therefore all other data has been encrypted. */ encryptedTextFlux = Flux.concat(encryptedTextFlux, Flux.defer(() -> { try { return Flux.just(ByteBuffer.wrap(cipher.doFinal())); } catch (GeneralSecurityException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } })); return new EncryptedBlob(encryptionData, encryptedTextFlux); }); } catch (NoSuchAlgorithmException | NoSuchPaddingException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } } /** * Encrypt the blob and add the encryption metadata to the customer's metadata. * * @param plainText The data to encrypt * @param metadata The customer's metadata to be updated. * * @return A Mono containing the cipher text */ private Mono<Flux<ByteBuffer>> prepareToSendEncryptedRequest(Flux<ByteBuffer> plainText, Map<String, String> metadata) { try { return this.encryptBlob(plainText) .flatMap(encryptedBlob -> { try { metadata.put(CryptographyConstants.ENCRYPTION_DATA_KEY, encryptedBlob.getEncryptionData().toJsonString()); return Mono.just(encryptedBlob.getCiphertextFlux()); } catch (JsonProcessingException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }); } catch (InvalidKeyException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Flux<ByteBuffer> query(String expression) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Mono<BlobQueryAsyncResponse> queryWithResponse(BlobQueryOptions queryOptions) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } }
class EncryptedBlobAsyncClient extends BlobAsyncClient { static final int BLOB_DEFAULT_UPLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final long BLOB_MAX_UPLOAD_BLOCK_SIZE = 4000L * Constants.MB; private final ClientLogger logger = new ClientLogger(EncryptedBlobAsyncClient.class); /** * An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content key during encryption. */ private final AsyncKeyEncryptionKey keyWrapper; /** * A {@link String} that is used to wrap/unwrap the content key during encryption. */ private final String keyWrapAlgorithm; /** * Package-private constructor for use by {@link EncryptedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param key The key used to encrypt and decrypt data. * @param keyWrapAlgorithm The algorithm used to wrap/unwrap the key during encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ EncryptedBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, AsyncKeyEncryptionKey key, String keyWrapAlgorithm, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, null, versionId); this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; } /** * Creates a new block blob. By default this method will not overwrite an existing blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) { try { return this.upload(data, parallelTransferOptions, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param overwrite Whether or not to overwrite, should data exist on the blob. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, boolean overwrite) { try { Mono<BlockBlobItem> uploadTask = this.uploadWithResponse(data, parallelTransferOptions, null, null, null, null).flatMap(FluxUtil::toMono); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadWithResponse(new BlobParallelUploadOptions(data) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(AccessTier.HOT).setRequestConditions(requestConditions)); } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param options {@link BlobParallelUploadOptions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { try { StorageImplUtils.assertNotNull("options", options); final Map<String, String> metadataFinal = options.getMetadata() == null ? new HashMap<>() : options.getMetadata(); Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer( options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE) : options.getDataFlux(); Mono<Flux<ByteBuffer>> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal); return dataFinal.flatMap(df -> super.uploadWithResponse( } /** * Creates a new block blob with the content of the specified file. By default this method will not overwrite * existing data * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath) { try { return uploadFromFile(filePath, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param overwrite Whether or not to overwrite should data exist on the blob. * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath, boolean overwrite) { try { Mono<Void> uploadTask = uploadFromFile(filePath, null, null, null, null, null); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return An empty response * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 4000MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(tier).setRequestConditions(requestConditions)) .then(); } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFileWithResponse * * @param options {@link BlobUploadFromFileOptions} * @return A reactive response containing the information of the uploaded block blob. * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Response<BlockBlobItem>> uploadFromFileWithResponse(BlobUploadFromFileOptions options) { try { StorageImplUtils.assertNotNull("options", options); return Mono.using(() -> UploadUtils.uploadFileResourceSupplier(options.getFilePath(), logger), channel -> this.uploadWithResponse(new BlobParallelUploadOptions(FluxUtil.readFile(channel)) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions())) .doOnTerminate(() -> { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } }), channel -> UploadUtils.uploadFileCleanup(channel, logger)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Encrypts the given Flux ByteBuffer. * * @param plainTextFlux The Flux ByteBuffer to be encrypted. * * @return A {@link EncryptedBlob} * * @throws InvalidKeyException If the key provided is invalid */ Mono<EncryptedBlob> encryptBlob(Flux<ByteBuffer> plainTextFlux) throws InvalidKeyException { Objects.requireNonNull(this.keyWrapper, "keyWrapper cannot be null"); try { KeyGenerator keyGen = KeyGenerator.getInstance(CryptographyConstants.AES); keyGen.init(256); Cipher cipher = Cipher.getInstance(CryptographyConstants.AES_CBC_PKCS5PADDING); SecretKey aesKey = keyGen.generateKey(); cipher.init(Cipher.ENCRYPT_MODE, aesKey); Map<String, String> keyWrappingMetadata = new HashMap<>(); keyWrappingMetadata.put(CryptographyConstants.AGENT_METADATA_KEY, CryptographyConstants.AGENT_METADATA_VALUE); return this.keyWrapper.wrapKey(keyWrapAlgorithm, aesKey.getEncoded()) .map(encryptedKey -> { WrappedKey wrappedKey = new WrappedKey( this.keyWrapper.getKeyId().block(), encryptedKey, keyWrapAlgorithm); EncryptionData encryptionData = new EncryptionData() .setEncryptionMode(CryptographyConstants.ENCRYPTION_MODE) .setEncryptionAgent( new EncryptionAgent(CryptographyConstants.ENCRYPTION_PROTOCOL_V1, EncryptionAlgorithm.AES_CBC_256)) .setKeyWrappingMetadata(keyWrappingMetadata) .setContentEncryptionIV(cipher.getIV()) .setWrappedContentKey(wrappedKey); Flux<ByteBuffer> encryptedTextFlux = plainTextFlux.map(plainTextBuffer -> { int outputSize = cipher.getOutputSize(plainTextBuffer.remaining()); /* This should be the only place we allocate memory in encryptBlob(). Although there is an overload that can encrypt in place that would save allocations, we do not want to overwrite customer's memory, so we must allocate our own memory. If memory usage becomes unreasonable, we should implement pooling. */ ByteBuffer encryptedTextBuffer = ByteBuffer.allocate(outputSize); int encryptedBytes; try { encryptedBytes = cipher.update(plainTextBuffer, encryptedTextBuffer); } catch (ShortBufferException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } encryptedTextBuffer.position(0); encryptedTextBuffer.limit(encryptedBytes); return encryptedTextBuffer; }); /* Defer() ensures the contained code is not executed until the Flux is subscribed to, in other words, cipher.doFinal() will not be called until the plainTextFlux has completed and therefore all other data has been encrypted. */ encryptedTextFlux = Flux.concat(encryptedTextFlux, Flux.defer(() -> { try { return Flux.just(ByteBuffer.wrap(cipher.doFinal())); } catch (GeneralSecurityException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } })); return new EncryptedBlob(encryptionData, encryptedTextFlux); }); } catch (NoSuchAlgorithmException | NoSuchPaddingException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } } /** * Encrypt the blob and add the encryption metadata to the customer's metadata. * * @param plainText The data to encrypt * @param metadata The customer's metadata to be updated. * * @return A Mono containing the cipher text */ private Mono<Flux<ByteBuffer>> prepareToSendEncryptedRequest(Flux<ByteBuffer> plainText, Map<String, String> metadata) { try { return this.encryptBlob(plainText) .flatMap(encryptedBlob -> { try { metadata.put(CryptographyConstants.ENCRYPTION_DATA_KEY, encryptedBlob.getEncryptionData().toJsonString()); return Mono.just(encryptedBlob.getCiphertextFlux()); } catch (JsonProcessingException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }); } catch (InvalidKeyException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Flux<ByteBuffer> query(String expression) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Mono<BlobQueryAsyncResponse> queryWithResponse(BlobQueryOptions queryOptions) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } }
btw. Collections.emtpyMap() might be more optimal.
public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { try { Objects.requireNonNull(options); final Map<String, String> metadataFinal = options.getMetadata() == null ? new HashMap<>() : options.getMetadata(); options.setMetadata(metadataFinal); Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer( options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE) : options.getDataFlux(); Mono<Flux<ByteBuffer>> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal); return dataFinal.flatMap(df -> super.uploadWithResponse(new BlobParallelUploadOptions(df) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions()).setTimeout(options.getTimeout()))); } catch (RuntimeException ex) { return monoError(logger, ex); } }
options.setMetadata(metadataFinal);
new BlobParallelUploadOptions(df) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(metadataFinal).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions()))); } catch (RuntimeException ex) { return monoError(logger, ex); }
class EncryptedBlobAsyncClient extends BlobAsyncClient { static final int BLOB_DEFAULT_UPLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final long BLOB_MAX_UPLOAD_BLOCK_SIZE = 4000L * Constants.MB; private final ClientLogger logger = new ClientLogger(EncryptedBlobAsyncClient.class); /** * An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content key during encryption. */ private final AsyncKeyEncryptionKey keyWrapper; /** * A {@link String} that is used to wrap/unwrap the content key during encryption. */ private final String keyWrapAlgorithm; /** * Package-private constructor for use by {@link EncryptedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param key The key used to encrypt and decrypt data. * @param keyWrapAlgorithm The algorithm used to wrap/unwrap the key during encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ EncryptedBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, AsyncKeyEncryptionKey key, String keyWrapAlgorithm, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, null, versionId); this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; } /** * Creates a new block blob. By default this method will not overwrite an existing blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) { try { return this.upload(data, parallelTransferOptions, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param overwrite Whether or not to overwrite, should data exist on the blob. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, boolean overwrite) { try { Mono<BlockBlobItem> uploadTask = this.uploadWithResponse(data, parallelTransferOptions, null, null, null, null).flatMap(FluxUtil::toMono); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadWithResponse(new BlobParallelUploadOptions(data) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(AccessTier.HOT).setRequestConditions(requestConditions)); } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param options {@link BlobParallelUploadOptions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { try { Objects.requireNonNull(options); final Map<String, String> metadataFinal = options.getMetadata() == null ? new HashMap<>() : options.getMetadata(); options.setMetadata(metadataFinal); Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer( options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE) : options.getDataFlux(); Mono<Flux<ByteBuffer>> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal); return dataFinal.flatMap(df -> super.uploadWithResponse( } /** * Creates a new block blob with the content of the specified file. By default this method will not overwrite * existing data * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath) { try { return uploadFromFile(filePath, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param overwrite Whether or not to overwrite should data exist on the blob. * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath, boolean overwrite) { try { Mono<Void> uploadTask = uploadFromFile(filePath, null, null, null, null, null); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return An empty response * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 4000MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(tier).setRequestConditions(requestConditions)) .then(); } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFileWithResponse * * @param options {@link BlobUploadFromFileOptions} * @return A reactive response containing the information of the uploaded block blob. * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Response<BlockBlobItem>> uploadFromFileWithResponse(BlobUploadFromFileOptions options) { try { StorageImplUtils.assertNotNull("options", options); return Mono.using(() -> UploadUtils.uploadFileResourceSupplier(options.getFilePath(), logger), channel -> this.uploadWithResponse(new BlobParallelUploadOptions(FluxUtil.readFile(channel)) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions())) .doOnTerminate(() -> { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } }), channel -> UploadUtils.uploadFileCleanup(channel, logger)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Encrypts the given Flux ByteBuffer. * * @param plainTextFlux The Flux ByteBuffer to be encrypted. * * @return A {@link EncryptedBlob} * * @throws InvalidKeyException If the key provided is invalid */ Mono<EncryptedBlob> encryptBlob(Flux<ByteBuffer> plainTextFlux) throws InvalidKeyException { Objects.requireNonNull(this.keyWrapper, "keyWrapper cannot be null"); try { KeyGenerator keyGen = KeyGenerator.getInstance(CryptographyConstants.AES); keyGen.init(256); Cipher cipher = Cipher.getInstance(CryptographyConstants.AES_CBC_PKCS5PADDING); SecretKey aesKey = keyGen.generateKey(); cipher.init(Cipher.ENCRYPT_MODE, aesKey); Map<String, String> keyWrappingMetadata = new HashMap<>(); keyWrappingMetadata.put(CryptographyConstants.AGENT_METADATA_KEY, CryptographyConstants.AGENT_METADATA_VALUE); return this.keyWrapper.wrapKey(keyWrapAlgorithm, aesKey.getEncoded()) .map(encryptedKey -> { WrappedKey wrappedKey = new WrappedKey( this.keyWrapper.getKeyId().block(), encryptedKey, keyWrapAlgorithm); EncryptionData encryptionData = new EncryptionData() .setEncryptionMode(CryptographyConstants.ENCRYPTION_MODE) .setEncryptionAgent( new EncryptionAgent(CryptographyConstants.ENCRYPTION_PROTOCOL_V1, EncryptionAlgorithm.AES_CBC_256)) .setKeyWrappingMetadata(keyWrappingMetadata) .setContentEncryptionIV(cipher.getIV()) .setWrappedContentKey(wrappedKey); Flux<ByteBuffer> encryptedTextFlux = plainTextFlux.map(plainTextBuffer -> { int outputSize = cipher.getOutputSize(plainTextBuffer.remaining()); /* This should be the only place we allocate memory in encryptBlob(). Although there is an overload that can encrypt in place that would save allocations, we do not want to overwrite customer's memory, so we must allocate our own memory. If memory usage becomes unreasonable, we should implement pooling. */ ByteBuffer encryptedTextBuffer = ByteBuffer.allocate(outputSize); int encryptedBytes; try { encryptedBytes = cipher.update(plainTextBuffer, encryptedTextBuffer); } catch (ShortBufferException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } encryptedTextBuffer.position(0); encryptedTextBuffer.limit(encryptedBytes); return encryptedTextBuffer; }); /* Defer() ensures the contained code is not executed until the Flux is subscribed to, in other words, cipher.doFinal() will not be called until the plainTextFlux has completed and therefore all other data has been encrypted. */ encryptedTextFlux = Flux.concat(encryptedTextFlux, Flux.defer(() -> { try { return Flux.just(ByteBuffer.wrap(cipher.doFinal())); } catch (GeneralSecurityException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } })); return new EncryptedBlob(encryptionData, encryptedTextFlux); }); } catch (NoSuchAlgorithmException | NoSuchPaddingException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } } /** * Encrypt the blob and add the encryption metadata to the customer's metadata. * * @param plainText The data to encrypt * @param metadata The customer's metadata to be updated. * * @return A Mono containing the cipher text */ private Mono<Flux<ByteBuffer>> prepareToSendEncryptedRequest(Flux<ByteBuffer> plainText, Map<String, String> metadata) { try { return this.encryptBlob(plainText) .flatMap(encryptedBlob -> { try { metadata.put(CryptographyConstants.ENCRYPTION_DATA_KEY, encryptedBlob.getEncryptionData().toJsonString()); return Mono.just(encryptedBlob.getCiphertextFlux()); } catch (JsonProcessingException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }); } catch (InvalidKeyException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Flux<ByteBuffer> query(String expression) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Mono<BlobQueryAsyncResponse> queryWithResponse(BlobQueryOptions queryOptions) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } }
class EncryptedBlobAsyncClient extends BlobAsyncClient { static final int BLOB_DEFAULT_UPLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final long BLOB_MAX_UPLOAD_BLOCK_SIZE = 4000L * Constants.MB; private final ClientLogger logger = new ClientLogger(EncryptedBlobAsyncClient.class); /** * An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content key during encryption. */ private final AsyncKeyEncryptionKey keyWrapper; /** * A {@link String} that is used to wrap/unwrap the content key during encryption. */ private final String keyWrapAlgorithm; /** * Package-private constructor for use by {@link EncryptedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param key The key used to encrypt and decrypt data. * @param keyWrapAlgorithm The algorithm used to wrap/unwrap the key during encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ EncryptedBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, AsyncKeyEncryptionKey key, String keyWrapAlgorithm, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, null, versionId); this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; } /** * Creates a new block blob. By default this method will not overwrite an existing blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) { try { return this.upload(data, parallelTransferOptions, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param overwrite Whether or not to overwrite, should data exist on the blob. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, boolean overwrite) { try { Mono<BlockBlobItem> uploadTask = this.uploadWithResponse(data, parallelTransferOptions, null, null, null, null).flatMap(FluxUtil::toMono); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadWithResponse(new BlobParallelUploadOptions(data) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(AccessTier.HOT).setRequestConditions(requestConditions)); } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param options {@link BlobParallelUploadOptions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { try { StorageImplUtils.assertNotNull("options", options); final Map<String, String> metadataFinal = options.getMetadata() == null ? new HashMap<>() : options.getMetadata(); Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer( options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE) : options.getDataFlux(); Mono<Flux<ByteBuffer>> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal); return dataFinal.flatMap(df -> super.uploadWithResponse( } /** * Creates a new block blob with the content of the specified file. By default this method will not overwrite * existing data * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath) { try { return uploadFromFile(filePath, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param overwrite Whether or not to overwrite should data exist on the blob. * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath, boolean overwrite) { try { Mono<Void> uploadTask = uploadFromFile(filePath, null, null, null, null, null); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return An empty response * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 4000MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(tier).setRequestConditions(requestConditions)) .then(); } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFileWithResponse * * @param options {@link BlobUploadFromFileOptions} * @return A reactive response containing the information of the uploaded block blob. * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Response<BlockBlobItem>> uploadFromFileWithResponse(BlobUploadFromFileOptions options) { try { StorageImplUtils.assertNotNull("options", options); return Mono.using(() -> UploadUtils.uploadFileResourceSupplier(options.getFilePath(), logger), channel -> this.uploadWithResponse(new BlobParallelUploadOptions(FluxUtil.readFile(channel)) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions())) .doOnTerminate(() -> { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } }), channel -> UploadUtils.uploadFileCleanup(channel, logger)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Encrypts the given Flux ByteBuffer. * * @param plainTextFlux The Flux ByteBuffer to be encrypted. * * @return A {@link EncryptedBlob} * * @throws InvalidKeyException If the key provided is invalid */ Mono<EncryptedBlob> encryptBlob(Flux<ByteBuffer> plainTextFlux) throws InvalidKeyException { Objects.requireNonNull(this.keyWrapper, "keyWrapper cannot be null"); try { KeyGenerator keyGen = KeyGenerator.getInstance(CryptographyConstants.AES); keyGen.init(256); Cipher cipher = Cipher.getInstance(CryptographyConstants.AES_CBC_PKCS5PADDING); SecretKey aesKey = keyGen.generateKey(); cipher.init(Cipher.ENCRYPT_MODE, aesKey); Map<String, String> keyWrappingMetadata = new HashMap<>(); keyWrappingMetadata.put(CryptographyConstants.AGENT_METADATA_KEY, CryptographyConstants.AGENT_METADATA_VALUE); return this.keyWrapper.wrapKey(keyWrapAlgorithm, aesKey.getEncoded()) .map(encryptedKey -> { WrappedKey wrappedKey = new WrappedKey( this.keyWrapper.getKeyId().block(), encryptedKey, keyWrapAlgorithm); EncryptionData encryptionData = new EncryptionData() .setEncryptionMode(CryptographyConstants.ENCRYPTION_MODE) .setEncryptionAgent( new EncryptionAgent(CryptographyConstants.ENCRYPTION_PROTOCOL_V1, EncryptionAlgorithm.AES_CBC_256)) .setKeyWrappingMetadata(keyWrappingMetadata) .setContentEncryptionIV(cipher.getIV()) .setWrappedContentKey(wrappedKey); Flux<ByteBuffer> encryptedTextFlux = plainTextFlux.map(plainTextBuffer -> { int outputSize = cipher.getOutputSize(plainTextBuffer.remaining()); /* This should be the only place we allocate memory in encryptBlob(). Although there is an overload that can encrypt in place that would save allocations, we do not want to overwrite customer's memory, so we must allocate our own memory. If memory usage becomes unreasonable, we should implement pooling. */ ByteBuffer encryptedTextBuffer = ByteBuffer.allocate(outputSize); int encryptedBytes; try { encryptedBytes = cipher.update(plainTextBuffer, encryptedTextBuffer); } catch (ShortBufferException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } encryptedTextBuffer.position(0); encryptedTextBuffer.limit(encryptedBytes); return encryptedTextBuffer; }); /* Defer() ensures the contained code is not executed until the Flux is subscribed to, in other words, cipher.doFinal() will not be called until the plainTextFlux has completed and therefore all other data has been encrypted. */ encryptedTextFlux = Flux.concat(encryptedTextFlux, Flux.defer(() -> { try { return Flux.just(ByteBuffer.wrap(cipher.doFinal())); } catch (GeneralSecurityException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } })); return new EncryptedBlob(encryptionData, encryptedTextFlux); }); } catch (NoSuchAlgorithmException | NoSuchPaddingException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } } /** * Encrypt the blob and add the encryption metadata to the customer's metadata. * * @param plainText The data to encrypt * @param metadata The customer's metadata to be updated. * * @return A Mono containing the cipher text */ private Mono<Flux<ByteBuffer>> prepareToSendEncryptedRequest(Flux<ByteBuffer> plainText, Map<String, String> metadata) { try { return this.encryptBlob(plainText) .flatMap(encryptedBlob -> { try { metadata.put(CryptographyConstants.ENCRYPTION_DATA_KEY, encryptedBlob.getEncryptionData().toJsonString()); return Mono.just(encryptedBlob.getCiphertextFlux()); } catch (JsonProcessingException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }); } catch (InvalidKeyException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Flux<ByteBuffer> query(String expression) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Mono<BlobQueryAsyncResponse> queryWithResponse(BlobQueryOptions queryOptions) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } }
The amount of duplicated logic we're amassing is concerning. This is going to be painful from a maintenance standpoint.
public ServiceBusReceiverAsyncClient buildAsyncClient() { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getDeadLetterEntityPath(logger, entityType, queueName, topicName, subscriptionName); if (prefetchCount < 1) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "prefetchCount (%s) cannot be less than 1.", prefetchCount))); } else if (maxAutoLockRenewalDuration != null && maxAutoLockRenewalDuration.isNegative()) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "maxAutoLockRenewalDuration (%s) cannot be negative.", maxAutoLockRenewalDuration))); } final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewalDuration); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose); }
if (prefetchCount < 1) {
public ServiceBusReceiverAsyncClient buildAsyncClient() { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName); validateAndThrow(prefetchCount, maxAutoLockRenewalDuration); final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewalDuration, sessionId, isRollingSessionReceiver(), maxConcurrentSessions); if (CoreUtils.isNullOrEmpty(sessionId)) { final UnnamedSessionManager sessionManager = new UnnamedSessionManager(entityPath, entityType, connectionProcessor, connectionProcessor.getRetryOptions().getTryTimeout(), tracerProvider, messageSerializer, receiverOptions); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager); } else { return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose); } }
class ServiceBusSessionReceiverClientBuilder { private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ReceiveMode receiveMode = ReceiveMode.PEEK_LOCK; private String sessionId; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewalDuration; private ServiceBusSessionReceiverClientBuilder() { } /** * Enables auto-lock renewal by renewing each session lock until the {@code maxAutoLockRenewalDuration} has * elapsed. * * @param maxAutoLockRenewalDuration Maximum amount of time to renew the session lock. * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewalDuration(Duration maxAutoLockRenewalDuration) { this.maxAutoLockRenewalDuration = maxAutoLockRenewalDuration; return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "maxConcurrentSessions cannot be less than 1.")); } this.maxConcurrentSessions = maxConcurrentSessions; return this; } /** * Sets the prefetch count of the receiver. For both {@link ReceiveMode * ReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) { this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder receiveMode(ReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the session id. * * @param sessionId session id. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder sessionId(String sessionId) { this.sessionId = sessionId; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusReceiverClient buildClient() { return new ServiceBusReceiverClient(buildAsyncClient(), retryOptions.getTryTimeout()); } /** * This is a rolling session receiver only if maxConcurrentSessions is > 0 AND sessionId is null or empty. If * there is a sessionId, this is going to be a single, named session receiver. * * @return {@code true} if this is an unnamed rolling session receiver; {@code false} otherwise. */ private boolean isRollingSessionReceiver() { if (maxConcurrentSessions == null) { return false; } if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } return CoreUtils.isNullOrEmpty(sessionId); } }
class ServiceBusSessionReceiverClientBuilder { private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ReceiveMode receiveMode = ReceiveMode.PEEK_LOCK; private String sessionId; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewalDuration; private ServiceBusSessionReceiverClientBuilder() { } /** * Enables auto-lock renewal by renewing each session lock until the {@code maxAutoLockRenewalDuration} has * elapsed. * * @param maxAutoLockRenewalDuration Maximum amount of time to renew the session lock. * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewalDuration(Duration maxAutoLockRenewalDuration) { this.maxAutoLockRenewalDuration = maxAutoLockRenewalDuration; return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "maxConcurrentSessions cannot be less than 1.")); } this.maxConcurrentSessions = maxConcurrentSessions; return this; } /** * Sets the prefetch count of the receiver. For both {@link ReceiveMode * ReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) { this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder receiveMode(ReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the session id. * * @param sessionId session id. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder sessionId(String sessionId) { this.sessionId = sessionId; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusReceiverClient buildClient() { return new ServiceBusReceiverClient(buildAsyncClient(), retryOptions.getTryTimeout()); } /** * This is a rolling session receiver only if maxConcurrentSessions is > 0 AND sessionId is null or empty. If * there is a sessionId, this is going to be a single, named session receiver. * * @return {@code true} if this is an unnamed rolling session receiver; {@code false} otherwise. */ private boolean isRollingSessionReceiver() { if (maxConcurrentSessions == null) { return false; } if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } return CoreUtils.isNullOrEmpty(sessionId); } }
Why returning an unmodifiable list here? I thought the intention of this API is return a copy of the current list for the user to modify before creating a CTC out of it.
public List<TokenCredential> getCredentials() { return Collections.unmodifiableList(tokenCredentials); }
return Collections.unmodifiableList(tokenCredentials);
public List<TokenCredential> getCredentials() { return super.getCredentials(); }
class DefaultAzureCredential extends ChainedTokenCredential { private final List<TokenCredential> tokenCredentials; /** * Creates default DefaultAzureCredential instance to use. This will use AZURE_CLIENT_ID, * AZURE_CLIENT_SECRET, and AZURE_TENANT_ID environment variables to create a * ClientSecretCredential. * * If these environment variables are not available, then this will use the Shared MSAL * token cache. * * @param tokenCredentials the list of credentials to execute for authentication. */ DefaultAzureCredential(ArrayDeque<TokenCredential> tokenCredentials) { super(tokenCredentials); this.tokenCredentials = new ArrayList<TokenCredential>(tokenCredentials.size()); this.tokenCredentials.addAll(tokenCredentials); } /** * Get the list of credentials sequentially used by {@link DefaultAzureCredential} to attempt authentication. * The credentials in the returned list and their order may change in future versions of Identity. * This API is not intended to be used in production ready code and should only be used for development purposes. * * @return The list of {@link TokenCredential}. */ }
class DefaultAzureCredential extends ChainedTokenCredential { /** * Creates default DefaultAzureCredential instance to use. This will use AZURE_CLIENT_ID, * AZURE_CLIENT_SECRET, and AZURE_TENANT_ID environment variables to create a * ClientSecretCredential. * * If these environment variables are not available, then this will use the Shared MSAL * token cache. * * @param tokenCredentials the list of credentials to execute for authentication. */ DefaultAzureCredential(List<TokenCredential> tokenCredentials) { super(tokenCredentials); } /** * {@inheritDoc} * The credentials in the returned list and their order may change in future versions of Identity. * This API is not intended to be used in production ready code and should only be used for development purposes. * * @return The list of {@link TokenCredential}. */ }
I think @schaabs said, we either return immutable list or a copy of it. I think having it immutable prevents the use of indexes to replace/re-order items in place. and will require users to create their custom list whenever they want to modify it. We can return a copy too.
public List<TokenCredential> getCredentials() { return Collections.unmodifiableList(tokenCredentials); }
return Collections.unmodifiableList(tokenCredentials);
public List<TokenCredential> getCredentials() { return super.getCredentials(); }
class DefaultAzureCredential extends ChainedTokenCredential { private final List<TokenCredential> tokenCredentials; /** * Creates default DefaultAzureCredential instance to use. This will use AZURE_CLIENT_ID, * AZURE_CLIENT_SECRET, and AZURE_TENANT_ID environment variables to create a * ClientSecretCredential. * * If these environment variables are not available, then this will use the Shared MSAL * token cache. * * @param tokenCredentials the list of credentials to execute for authentication. */ DefaultAzureCredential(ArrayDeque<TokenCredential> tokenCredentials) { super(tokenCredentials); this.tokenCredentials = new ArrayList<TokenCredential>(tokenCredentials.size()); this.tokenCredentials.addAll(tokenCredentials); } /** * Get the list of credentials sequentially used by {@link DefaultAzureCredential} to attempt authentication. * The credentials in the returned list and their order may change in future versions of Identity. * This API is not intended to be used in production ready code and should only be used for development purposes. * * @return The list of {@link TokenCredential}. */ }
class DefaultAzureCredential extends ChainedTokenCredential { /** * Creates default DefaultAzureCredential instance to use. This will use AZURE_CLIENT_ID, * AZURE_CLIENT_SECRET, and AZURE_TENANT_ID environment variables to create a * ClientSecretCredential. * * If these environment variables are not available, then this will use the Shared MSAL * token cache. * * @param tokenCredentials the list of credentials to execute for authentication. */ DefaultAzureCredential(List<TokenCredential> tokenCredentials) { super(tokenCredentials); } /** * {@inheritDoc} * The credentials in the returned list and their order may change in future versions of Identity. * This API is not intended to be used in production ready code and should only be used for development purposes. * * @return The list of {@link TokenCredential}. */ }
Discussed this offline, and since we're using `Colletions.unmodifiableList` to create this instance we can return it without worrying about users modifying it.
public List<TokenCredential> getCredentials() { return credentials; }
return credentials;
public List<TokenCredential> getCredentials() { return credentials; }
class ChainedTokenCredential implements TokenCredential { private final List<TokenCredential> credentials; private final String unavailableError = this.getClass().getSimpleName() + " authentication failed. ---> "; /** * Create an instance of chained token credential that aggregates a list of token * credentials. */ ChainedTokenCredential(List<TokenCredential> credentials) { this.credentials = Collections.unmodifiableList(credentials); } @Override public Mono<AccessToken> getToken(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); return Flux.fromIterable(credentials) .flatMap(p -> p.getToken(request).onErrorResume(Exception.class, t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( unavailableError + p.getClass().getSimpleName() + " authentication failed. Error Details: " + t.getMessage(), null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage(), last.getCause()); } return Mono.error(last); })); } /** * Get the read-only list of credentials sequentially used to attempt authentication. * * @return The list of {@link TokenCredential}. */ }
class ChainedTokenCredential implements TokenCredential { private final List<TokenCredential> credentials; private final String unavailableError = this.getClass().getSimpleName() + " authentication failed. ---> "; /** * Create an instance of chained token credential that aggregates a list of token * credentials. */ ChainedTokenCredential(List<TokenCredential> credentials) { this.credentials = Collections.unmodifiableList(credentials); } @Override public Mono<AccessToken> getToken(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); return Flux.fromIterable(credentials) .flatMap(p -> p.getToken(request).onErrorResume(Exception.class, t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( unavailableError + p.getClass().getSimpleName() + " authentication failed. Error Details: " + t.getMessage(), null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage(), last.getCause()); } return Mono.error(last); })); } /** * Get the read-only list of credentials sequentially used to attempt authentication. * * @return The list of {@link TokenCredential}. */ }
This should return an unmodifiable list. Create the unmodifiable list in the ctor itself.
public List<TokenCredential> getCredentials() { return tokenCredentials; }
return tokenCredentials;
public List<TokenCredential> getCredentials() { return super.getCredentials(); }
class DefaultAzureCredential extends ChainedTokenCredential { private final List<TokenCredential> tokenCredentials; /** * Creates default DefaultAzureCredential instance to use. This will use AZURE_CLIENT_ID, * AZURE_CLIENT_SECRET, and AZURE_TENANT_ID environment variables to create a * ClientSecretCredential. * * If these environment variables are not available, then this will use the Shared MSAL * token cache. * * @param tokenCredentials the list of credentials to execute for authentication. */ DefaultAzureCredential(ArrayDeque<TokenCredential> tokenCredentials) { super(tokenCredentials); this.tokenCredentials = new ArrayList<TokenCredential>(tokenCredentials.size()); this.tokenCredentials.addAll(tokenCredentials); } /** * Get the list of credentials sequentially used by {@link DefaultAzureCredential} to attempt authentication. * Any changes made to the returned list will not reflect in the list of credentials * used by {@link DefaultAzureCredential} to authenticate. * The credentials in the returned list and their order may change in future versions of Identity. * This API is not intended to be used in production ready code and should only be used for development purposes. * * @return The list of {@link TokenCredential}. */ }
class DefaultAzureCredential extends ChainedTokenCredential { /** * Creates default DefaultAzureCredential instance to use. This will use AZURE_CLIENT_ID, * AZURE_CLIENT_SECRET, and AZURE_TENANT_ID environment variables to create a * ClientSecretCredential. * * If these environment variables are not available, then this will use the Shared MSAL * token cache. * * @param tokenCredentials the list of credentials to execute for authentication. */ DefaultAzureCredential(List<TokenCredential> tokenCredentials) { super(tokenCredentials); } /** * {@inheritDoc} * The credentials in the returned list and their order may change in future versions of Identity. * This API is not intended to be used in production ready code and should only be used for development purposes. * * @return The list of {@link TokenCredential}. */ }
made it unmodifiable. If users want to modify it, they can create a modifiable list from it. @jianghaolu
public List<TokenCredential> getCredentials() { return tokenCredentials; }
return tokenCredentials;
public List<TokenCredential> getCredentials() { return super.getCredentials(); }
class DefaultAzureCredential extends ChainedTokenCredential { private final List<TokenCredential> tokenCredentials; /** * Creates default DefaultAzureCredential instance to use. This will use AZURE_CLIENT_ID, * AZURE_CLIENT_SECRET, and AZURE_TENANT_ID environment variables to create a * ClientSecretCredential. * * If these environment variables are not available, then this will use the Shared MSAL * token cache. * * @param tokenCredentials the list of credentials to execute for authentication. */ DefaultAzureCredential(ArrayDeque<TokenCredential> tokenCredentials) { super(tokenCredentials); this.tokenCredentials = new ArrayList<TokenCredential>(tokenCredentials.size()); this.tokenCredentials.addAll(tokenCredentials); } /** * Get the list of credentials sequentially used by {@link DefaultAzureCredential} to attempt authentication. * Any changes made to the returned list will not reflect in the list of credentials * used by {@link DefaultAzureCredential} to authenticate. * The credentials in the returned list and their order may change in future versions of Identity. * This API is not intended to be used in production ready code and should only be used for development purposes. * * @return The list of {@link TokenCredential}. */ }
class DefaultAzureCredential extends ChainedTokenCredential { /** * Creates default DefaultAzureCredential instance to use. This will use AZURE_CLIENT_ID, * AZURE_CLIENT_SECRET, and AZURE_TENANT_ID environment variables to create a * ClientSecretCredential. * * If these environment variables are not available, then this will use the Shared MSAL * token cache. * * @param tokenCredentials the list of credentials to execute for authentication. */ DefaultAzureCredential(List<TokenCredential> tokenCredentials) { super(tokenCredentials); } /** * {@inheritDoc} * The credentials in the returned list and their order may change in future versions of Identity. * This API is not intended to be used in production ready code and should only be used for development purposes. * * @return The list of {@link TokenCredential}. */ }
The description here says that the list is read-only is this the case? It's returning the underlying instance of `credentials` as a `List<TokenCredential>`? We want to make sure we don't allow people to mutate the state of the already created credential.
public List<TokenCredential> getCredentials() { return credentials; }
return credentials;
public List<TokenCredential> getCredentials() { return credentials; }
class ChainedTokenCredential implements TokenCredential { private final List<TokenCredential> credentials; private final String unavailableError = this.getClass().getSimpleName() + " authentication failed. ---> "; /** * Create an instance of chained token credential that aggregates a list of token * credentials. */ ChainedTokenCredential(List<TokenCredential> credentials) { this.credentials = Collections.unmodifiableList(credentials); } @Override public Mono<AccessToken> getToken(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); return Flux.fromIterable(credentials) .flatMap(p -> p.getToken(request).onErrorResume(Exception.class, t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( unavailableError + p.getClass().getSimpleName() + " authentication failed. Error Details: " + t.getMessage(), null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage(), last.getCause()); } return Mono.error(last); })); } /** * Get the read-only list of credentials sequentially used to attempt authentication. * * @return The list of {@link TokenCredential}. */ }
class ChainedTokenCredential implements TokenCredential { private final List<TokenCredential> credentials; private final String unavailableError = this.getClass().getSimpleName() + " authentication failed. ---> "; /** * Create an instance of chained token credential that aggregates a list of token * credentials. */ ChainedTokenCredential(List<TokenCredential> credentials) { this.credentials = Collections.unmodifiableList(credentials); } @Override public Mono<AccessToken> getToken(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); return Flux.fromIterable(credentials) .flatMap(p -> p.getToken(request).onErrorResume(Exception.class, t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( unavailableError + p.getClass().getSimpleName() + " authentication failed. Error Details: " + t.getMessage(), null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage(), last.getCause()); } return Mono.error(last); })); } /** * Get the read-only list of credentials sequentially used to attempt authentication. * * @return The list of {@link TokenCredential}. */ }
Since this is now `unmodifiableList` , we may want to document this in java doc for its getter @ https://github.com/Azure/azure-sdk-for-java/blob/44f6e0b52fe3203626036adcd48170b715cc0640/sdk/formrecognizer/azure-ai-formrecognizer/src/main/java/com/azure/ai/formrecognizer/models/BoundingBox.java#L41
public BoundingBox(final List<Point> points) { if (points == null) { this.points = null; } else { this.points = Collections.unmodifiableList(new ArrayList<>(points)); } }
}
public BoundingBox(final List<Point> points) { if (points == null) { this.points = null; } else { this.points = Collections.unmodifiableList(points); } }
class BoundingBox { /** * The list of coordinates of the Bounding box. */ private final List<Point> points; /** * Constructs a Bounding box object. * * @param points The list of coordinates of the Bounding box. */ /** * Gets the list of all point coordinates of the bounding box. * * @return The list of all point coordinates of the Bounding box. */ public List<Point> getPoints() { return this.points; } }
class BoundingBox { /** * The list of coordinates of the Bounding box. */ private final List<Point> points; /** * Constructs a Bounding box object. * * @param points The list of coordinates of the Bounding box. */ /** * Gets the list of all point coordinates of the bounding box. * * @return The unmodifiable list of all point coordinates of the Bounding box. */ public List<Point> getPoints() { return this.points; } }
Would it make sense to use `!CoreUtils.isNullOrEmpty(errorInformationList)` , that will also check for null ?
public FormRecognizerException(final String message, final List<ErrorInformation> errorInformationList) { super(message); StringBuilder errorInformationStringBuilder = new StringBuilder().append(message); if (!errorInformationList.isEmpty()) { for (ErrorInformation errorInformation : errorInformationList) { errorInformationStringBuilder.append(", " + "errorCode" + ": [") .append(errorInformation.getCode()).append("], ").append("message") .append(": ").append(errorInformation.getMessage()); } } this.errorInformationMessage = errorInformationStringBuilder.toString(); this.errorInformationList = Collections.unmodifiableList(new ArrayList<>(errorInformationList)); }
if (!errorInformationList.isEmpty()) {
public FormRecognizerException(final String message, final List<ErrorInformation> errorInformationList) { super(message); StringBuilder errorInformationStringBuilder = new StringBuilder().append(message); if (!CoreUtils.isNullOrEmpty(errorInformationList)) { for (ErrorInformation errorInformation : errorInformationList) { errorInformationStringBuilder.append(", " + "errorCode" + ": [") .append(errorInformation.getCode()).append("], ").append("message") .append(": ").append(errorInformation.getMessage()); } this.errorInformationList = Collections.unmodifiableList(errorInformationList); } else { this.errorInformationList = null; } this.errorInformationMessage = errorInformationStringBuilder.toString(); }
class FormRecognizerException extends AzureException { private final List<ErrorInformation> errorInformationList; private final String errorInformationMessage; /** * Initializes a new instance of {@link FormRecognizerException} class * * @param message Text containing the details of the exception. * @param errorInformationList The List of error information that caused the exception */ @Override public String getMessage() { return this.errorInformationMessage; } /** * Get the error information list for this exception. * * @return the error information list for this exception. */ public List<ErrorInformation> getErrorInformation() { return this.errorInformationList; } }
class FormRecognizerException extends AzureException { private final List<ErrorInformation> errorInformationList; private final String errorInformationMessage; /** * Initializes a new instance of {@link FormRecognizerException} class * * @param message Text containing the details of the exception. * @param errorInformationList The List of error information that caused the exception */ @Override public String getMessage() { return this.errorInformationMessage; } /** * Get the error information list for this exception. * * @return the unmodifiable error information list for this exception. */ public List<ErrorInformation> getErrorInformation() { return this.errorInformationList; } }
Don't need to create a new ArrayList here.
public BoundingBox(final List<Point> points) { if (points == null) { this.points = null; } else { this.points = Collections.unmodifiableList(new ArrayList<>(points)); } }
this.points = Collections.unmodifiableList(new ArrayList<>(points));
public BoundingBox(final List<Point> points) { if (points == null) { this.points = null; } else { this.points = Collections.unmodifiableList(points); } }
class BoundingBox { /** * The list of coordinates of the Bounding box. */ private final List<Point> points; /** * Constructs a Bounding box object. * * @param points The list of coordinates of the Bounding box. */ /** * Gets the list of all point coordinates of the bounding box. * * @return The unmodifiable list of all point coordinates of the Bounding box. */ public List<Point> getPoints() { return this.points; } }
class BoundingBox { /** * The list of coordinates of the Bounding box. */ private final List<Point> points; /** * Constructs a Bounding box object. * * @param points The list of coordinates of the Bounding box. */ /** * Gets the list of all point coordinates of the bounding box. * * @return The unmodifiable list of all point coordinates of the Bounding box. */ public List<Point> getPoints() { return this.points; } }
Not sure whether it may be `http` or not. I think it better to extract `host` from `url` first.
private String constructStorageAccountId(String vhdUrl) { try { return ResourceUtils.constructResourceId(this.manager().subscriptionId(), resourceGroupName(), "Microsoft.Storage", "storageAccounts", vhdUrl.split("\\.")[0].replace("https: ""); } catch (RuntimeException ex) { throw logger.logExceptionAsError( new InvalidParameterException(String.format("%s is not valid URI of a blob to import.", vhdUrl))); } }
vhdUrl.split("\\.")[0].replace("https:
private String constructStorageAccountId(String vhdUrl) { try { return ResourceUtils.constructResourceId(this.manager().subscriptionId(), resourceGroupName(), "Microsoft.Storage", "storageAccounts", vhdUrl.split("\\.")[0].replace("https: ""); } catch (RuntimeException ex) { throw logger.logExceptionAsError( new InvalidParameterException(String.format("%s is not valid URI of a blob to import.", vhdUrl))); } }
class SnapshotImpl extends GroupableResourceImpl<Snapshot, SnapshotInner, SnapshotImpl, ComputeManager> implements Snapshot, Snapshot.Definition, Snapshot.Update { private final ClientLogger logger = new ClientLogger(SnapshotImpl.class); SnapshotImpl(String name, SnapshotInner innerModel, final ComputeManager computeManager) { super(name, innerModel, computeManager); } @Override public DiskSkuTypes sku() { if (this.inner().sku() == null || this.inner().sku().name() == null) { return null; } else { return DiskSkuTypes .fromStorageAccountType(DiskStorageAccountTypes.fromString(this.inner().sku().name().toString())); } } @Override public SnapshotSkuType skuType() { if (this.inner().sku() == null) { return null; } else { return SnapshotSkuType.fromSnapshotSku(this.inner().sku()); } } @Override public DiskCreateOption creationMethod() { return this.inner().creationData().createOption(); } @Override public boolean incremental() { return this.inner().incremental(); } @Override public int sizeInGB() { return Utils.toPrimitiveInt(this.inner().diskSizeGB()); } @Override public OperatingSystemTypes osType() { return this.inner().osType(); } @Override public CreationSource source() { return new CreationSource(this.inner().creationData()); } @Override public String grantAccess(int accessDurationInSeconds) { return this.grantAccessAsync(accessDurationInSeconds).block(); } @Override public Mono<String> grantAccessAsync(int accessDurationInSeconds) { GrantAccessData grantAccessDataInner = new GrantAccessData(); grantAccessDataInner.withAccess(AccessLevel.READ).withDurationInSeconds(accessDurationInSeconds); return manager() .inner() .getSnapshots() .grantAccessAsync(resourceGroupName(), name(), grantAccessDataInner) .onErrorResume(e -> Mono.empty()) .map(accessUriInner -> accessUriInner.accessSas()); } @Override public void revokeAccess() { this.revokeAccessAsync().block(); } @Override public Mono<Void> revokeAccessAsync() { return this.manager().inner().getSnapshots().revokeAccessAsync(this.resourceGroupName(), this.name()); } @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl) { return withLinuxFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl, String storageAccountId) { this .inner() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withLinuxFromDisk(String sourceDiskId) { this .inner() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withLinuxFromDisk(Disk sourceDisk) { withLinuxFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } this.withSku(sourceDisk.sku()); return this; } @Override public SnapshotImpl withLinuxFromSnapshot(String sourceSnapshotId) { this .inner() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withLinuxFromSnapshot(Snapshot sourceSnapshot) { withLinuxFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.sku()); return this; } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl) { return withWindowsFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl, String storageAccountId) { this .inner() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withWindowsFromDisk(String sourceDiskId) { this .inner() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withWindowsFromDisk(Disk sourceDisk) { withWindowsFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } this.withSku(sourceDisk.sku()); return this; } @Override public SnapshotImpl withWindowsFromSnapshot(String sourceSnapshotId) { this .inner() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withWindowsFromSnapshot(Snapshot sourceSnapshot) { withWindowsFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.sku()); return this; } @Override public SnapshotImpl withDataFromVhd(String vhdUrl) { return withDataFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withDataFromVhd(String vhdUrl, String storageAccountId) { this .inner() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withDataFromSnapshot(String snapshotId) { this .inner() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(snapshotId); return this; } @Override public SnapshotImpl withDataFromSnapshot(Snapshot snapshot) { return withDataFromSnapshot(snapshot.id()); } @Override public SnapshotImpl withDataFromDisk(String managedDiskId) { this .inner() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(managedDiskId); return this; } @Override public SnapshotImpl withDataFromDisk(Disk managedDisk) { return withDataFromDisk(managedDisk.id()).withOSType(managedDisk.osType()).withSku(managedDisk.sku()); } @Override public SnapshotImpl withSizeInGB(int sizeInGB) { this.inner().withDiskSizeGB(sizeInGB); return this; } @Override public SnapshotImpl withIncremental(boolean enabled) { this.inner().withIncremental(enabled); return this; } @Override public SnapshotImpl withOSType(OperatingSystemTypes osType) { this.inner().withOsType(osType); return this; } @Override public SnapshotImpl withSku(DiskSkuTypes sku) { SnapshotSku snapshotSku = new SnapshotSku(); snapshotSku.withName(SnapshotStorageAccountTypes.fromString(sku.accountType().toString())); this.inner().withSku(snapshotSku); return this; } @Override public SnapshotImpl withSku(SnapshotSkuType sku) { this.inner().withSku(new SnapshotSku().withName(sku.accountType())); return this; } @Override public Mono<Snapshot> createResourceAsync() { return this .manager() .inner() .getSnapshots() .createOrUpdateAsync(resourceGroupName(), name(), this.inner()) .map(innerToFluentMap(this)); } @Override protected Mono<SnapshotInner> getInnerAsync() { return this.manager().inner().getSnapshots().getByResourceGroupAsync(this.resourceGroupName(), this.name()); } }
class SnapshotImpl extends GroupableResourceImpl<Snapshot, SnapshotInner, SnapshotImpl, ComputeManager> implements Snapshot, Snapshot.Definition, Snapshot.Update { private final ClientLogger logger = new ClientLogger(SnapshotImpl.class); SnapshotImpl(String name, SnapshotInner innerModel, final ComputeManager computeManager) { super(name, innerModel, computeManager); } @Override public DiskSkuTypes sku() { if (this.inner().sku() == null || this.inner().sku().name() == null) { return null; } else { return DiskSkuTypes .fromStorageAccountType(DiskStorageAccountTypes.fromString(this.inner().sku().name().toString())); } } @Override public SnapshotSkuType skuType() { if (this.inner().sku() == null) { return null; } else { return SnapshotSkuType.fromSnapshotSku(this.inner().sku()); } } @Override public DiskCreateOption creationMethod() { return this.inner().creationData().createOption(); } @Override public boolean incremental() { return this.inner().incremental(); } @Override public int sizeInGB() { return Utils.toPrimitiveInt(this.inner().diskSizeGB()); } @Override public OperatingSystemTypes osType() { return this.inner().osType(); } @Override public CreationSource source() { return new CreationSource(this.inner().creationData()); } @Override public String grantAccess(int accessDurationInSeconds) { return this.grantAccessAsync(accessDurationInSeconds).block(); } @Override public Mono<String> grantAccessAsync(int accessDurationInSeconds) { GrantAccessData grantAccessDataInner = new GrantAccessData(); grantAccessDataInner.withAccess(AccessLevel.READ).withDurationInSeconds(accessDurationInSeconds); return manager() .inner() .getSnapshots() .grantAccessAsync(resourceGroupName(), name(), grantAccessDataInner) .onErrorResume(e -> Mono.empty()) .map(accessUriInner -> accessUriInner.accessSas()); } @Override public void revokeAccess() { this.revokeAccessAsync().block(); } @Override public Mono<Void> revokeAccessAsync() { return this.manager().inner().getSnapshots().revokeAccessAsync(this.resourceGroupName(), this.name()); } @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl) { return withLinuxFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl, String storageAccountId) { this .inner() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withLinuxFromDisk(String sourceDiskId) { this .inner() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withLinuxFromDisk(Disk sourceDisk) { withLinuxFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } this.withSku(sourceDisk.sku()); return this; } @Override public SnapshotImpl withLinuxFromSnapshot(String sourceSnapshotId) { this .inner() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withLinuxFromSnapshot(Snapshot sourceSnapshot) { withLinuxFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.sku()); return this; } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl) { return withWindowsFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl, String storageAccountId) { this .inner() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withWindowsFromDisk(String sourceDiskId) { this .inner() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withWindowsFromDisk(Disk sourceDisk) { withWindowsFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } this.withSku(sourceDisk.sku()); return this; } @Override public SnapshotImpl withWindowsFromSnapshot(String sourceSnapshotId) { this .inner() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withWindowsFromSnapshot(Snapshot sourceSnapshot) { withWindowsFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.sku()); return this; } @Override public SnapshotImpl withDataFromVhd(String vhdUrl) { return withDataFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withDataFromVhd(String vhdUrl, String storageAccountId) { this .inner() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withDataFromSnapshot(String snapshotId) { this .inner() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(snapshotId); return this; } @Override public SnapshotImpl withDataFromSnapshot(Snapshot snapshot) { return withDataFromSnapshot(snapshot.id()); } @Override public SnapshotImpl withDataFromDisk(String managedDiskId) { this .inner() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(managedDiskId); return this; } @Override public SnapshotImpl withDataFromDisk(Disk managedDisk) { return withDataFromDisk(managedDisk.id()).withOSType(managedDisk.osType()).withSku(managedDisk.sku()); } @Override public SnapshotImpl withSizeInGB(int sizeInGB) { this.inner().withDiskSizeGB(sizeInGB); return this; } @Override public SnapshotImpl withIncremental(boolean enabled) { this.inner().withIncremental(enabled); return this; } @Override public SnapshotImpl withOSType(OperatingSystemTypes osType) { this.inner().withOsType(osType); return this; } @Override public SnapshotImpl withSku(DiskSkuTypes sku) { SnapshotSku snapshotSku = new SnapshotSku(); snapshotSku.withName(SnapshotStorageAccountTypes.fromString(sku.accountType().toString())); this.inner().withSku(snapshotSku); return this; } @Override public SnapshotImpl withSku(SnapshotSkuType sku) { this.inner().withSku(new SnapshotSku().withName(sku.accountType())); return this; } @Override public Mono<Snapshot> createResourceAsync() { return this .manager() .inner() .getSnapshots() .createOrUpdateAsync(resourceGroupName(), name(), this.inner()) .map(innerToFluentMap(this)); } @Override protected Mono<SnapshotInner> getInnerAsync() { return this.manager().inner().getSnapshots().getByResourceGroupAsync(this.resourceGroupName(), this.name()); } }
Check whether to use `File.deleteOnExit` ?
private File compressSource(File sourceFolder) throws IOException { File compressFile = File.createTempFile("java_package", "tar.gz"); try (TarArchiveOutputStream tarArchiveOutputStream = new TarArchiveOutputStream( new GZIPOutputStream(new FileOutputStream(compressFile)))) { tarArchiveOutputStream.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU); for (Path sourceFile : Files.walk(sourceFolder.toPath()).collect(Collectors.toList())) { String relativePath = sourceFolder.toPath().relativize(sourceFile).toString(); TarArchiveEntry entry = new TarArchiveEntry(sourceFile.toFile(), relativePath); if (sourceFile.toFile().isFile()) { try (InputStream inputStream = new FileInputStream(sourceFile.toFile())) { tarArchiveOutputStream.putArchiveEntry(entry); IOUtils.copy(inputStream, tarArchiveOutputStream); tarArchiveOutputStream.closeArchiveEntry(); } } else { tarArchiveOutputStream.putArchiveEntry(entry); tarArchiveOutputStream.closeArchiveEntry(); } } } return compressFile; }
File compressFile = File.createTempFile("java_package", "tar.gz");
private File compressSource(File sourceFolder) throws IOException { File compressFile = File.createTempFile("java_package", "tar.gz"); compressFile.deleteOnExit(); try (TarArchiveOutputStream tarArchiveOutputStream = new TarArchiveOutputStream( new GZIPOutputStream(new FileOutputStream(compressFile)))) { tarArchiveOutputStream.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU); for (Path sourceFile : Files.walk(sourceFolder.toPath()).collect(Collectors.toList())) { String relativePath = sourceFolder.toPath().relativize(sourceFile).toString(); TarArchiveEntry entry = new TarArchiveEntry(sourceFile.toFile(), relativePath); if (sourceFile.toFile().isFile()) { try (InputStream inputStream = new FileInputStream(sourceFile.toFile())) { tarArchiveOutputStream.putArchiveEntry(entry); IOUtils.copy(inputStream, tarArchiveOutputStream); tarArchiveOutputStream.closeArchiveEntry(); } } else { tarArchiveOutputStream.putArchiveEntry(entry); tarArchiveOutputStream.closeArchiveEntry(); } } } return compressFile; }
class SpringAppDeploymentImpl extends ExternalChildResourceImpl<SpringAppDeployment, DeploymentResourceInner, SpringAppImpl, SpringApp> implements SpringAppDeployment, SpringAppDeployment.Definition, SpringAppDeployment.Update { private static final int BLOCK_SIZE = 4 * 1024 * 1024; private final SpringAppDeploymentsImpl client; private SpringAppDeployment originalDeployment; SpringAppDeploymentImpl(String name, SpringAppImpl parent, DeploymentResourceInner innerObject, SpringAppDeploymentsImpl client) { super(name, parent, innerObject); this.client = client; } @Override public String appName() { if (inner().properties() == null) { return null; } return inner().properties().appName(); } @Override public DeploymentSettings settings() { if (inner().properties() == null) { return null; } return inner().properties().deploymentSettings(); } @Override public DeploymentResourceStatus status() { if (inner().properties() == null) { return null; } return inner().properties().status(); } @Override public boolean isActive() { if (inner().properties() == null) { return false; } return inner().properties().active(); } @Override public OffsetDateTime createdTime() { if (inner().properties() == null) { return null; } return inner().properties().createdTime(); } @Override public List<DeploymentInstance> instances() { if (inner().properties() == null) { return null; } return inner().properties().instances(); } @Override public String getLogFileUrl() { return getLogFileUrlAsync().block(); } @Override public Mono<String> getLogFileUrlAsync() { return manager().inner().getDeployments().getLogFileUrlAsync( parent().parent().resourceGroupName(), parent().parent().name(), parent().name(), name() ) .map(LogFileUrlResponseInner::url); } private void ensureDeploySettings() { if (inner().properties() == null) { inner().withProperties(new DeploymentResourceProperties()); } if (inner().properties().deploymentSettings() == null) { inner().properties().withDeploymentSettings(new DeploymentSettings()); } } private void ensureSource() { if (inner().properties() == null) { inner().withProperties(new DeploymentResourceProperties()); } if (inner().properties().source() == null) { inner().properties().withSource(new UserSourceInfo()); } } private Mono<ShareFileAsyncClient> createShareFileAsyncClient(ResourceUploadDefinition option, long maxSize) { ShareFileAsyncClient shareFileAsyncClient = new ShareFileClientBuilder() .endpoint(option.uploadUrl()) .httpClient(manager().httpPipeline().getHttpClient()) .buildFileAsyncClient(); return shareFileAsyncClient.create(maxSize) .then(Mono.just(shareFileAsyncClient)); } private Mono<ShareFileAsyncClient> uploadToStorage(byte[] bytes, ResourceUploadDefinition option) { inner().properties().source().withRelativePath(option.relativePath()); return createShareFileAsyncClient(option, bytes.length) .flatMap(shareFileAsyncClient -> { List<Integer> blockList = new ArrayList<>(); for (int start = 0; start < bytes.length; start += BLOCK_SIZE) { blockList.add(start); } return Flux.fromIterable(blockList) .flatMap(start -> { int length = Math.min(bytes.length - start, BLOCK_SIZE); return shareFileAsyncClient.uploadWithResponse( Flux.just(ByteBuffer.wrap(bytes, start, length)), length, (long) start); }) .then(Mono.just(shareFileAsyncClient)); }); } private Mono<ShareFileAsyncClient> uploadToStorage(File source, ResourceUploadDefinition option) { inner().properties().source().withRelativePath(option.relativePath()); try { return createShareFileAsyncClient(option, source.length()) .flatMap(shareFileAsyncClient -> shareFileAsyncClient.uploadFromFile(source.getAbsolutePath()) .then(Mono.just(shareFileAsyncClient))); } catch (Exception e) { return Mono.error(e); } } @Override public SpringAppDeploymentImpl withJarPath(File jar) { ensureSource(); inner().properties().source().withType(UserSourceType.JAR); this.addDependency( context -> parent().getResourceUploadUrlAsync() .flatMap(option -> uploadToStorage(jar, option) .then(context.voidMono())) ); return this; } @Override public SpringAppDeploymentImpl withJarFile(byte[] jar) { ensureSource(); inner().properties().source().withType(UserSourceType.JAR); this.addDependency( context -> parent().getResourceUploadUrlAsync() .flatMap(option -> uploadToStorage(jar, option) .then(context.voidMono())) ); return this; } @Override public SpringAppDeploymentImpl withSourceCodeFolder(File sourceCode) { ensureSource(); inner().properties().source().withType(UserSourceType.SOURCE); this.addDependency( context -> parent().getResourceUploadUrlAsync() .flatMap(option -> { try { return uploadToStorage(compressSource(sourceCode), option); } catch (Exception e) { return Mono.error(e); } }) .then(context.voidMono()) ); return this; } @Override public SpringAppDeploymentImpl withExistingSource(UserSourceType type, String relativePath) { ensureSource(); inner().properties().source().withType(type); inner().properties().source().withRelativePath(relativePath); return this; } @Override public SpringAppDeploymentImpl withSourceCodeTarGzFile(File sourceCodeTarGz) { ensureSource(); inner().properties().source().withType(UserSourceType.SOURCE); this.addDependency( context -> parent().getResourceUploadUrlAsync() .flatMap(option -> uploadToStorage(sourceCodeTarGz, option) .then(context.voidMono())) ); return this; } @Override public SpringAppDeploymentImpl withTargetModule(String moduleName) { ensureSource(); inner().properties().source().withArtifactSelector(moduleName); return this; } @Override public SpringAppDeploymentImpl withSingleModule() { ensureSource(); inner().properties().source().withArtifactSelector(null); return this; } @Override public SpringAppDeploymentImpl withCurrentActiveSetting() { this.addDependency( context -> client.getByNameAsync(parent().activeDeployment()) .map(deployment -> { originalDeployment = deployment; return (Indexable) deployment; }) ); return this; } @Override public SpringAppDeploymentImpl withSettingsFromDeployment(SpringAppDeployment deployment) { originalDeployment = deployment; return this; } @Override public SpringAppDeploymentImpl withSettingsFromDeployment(String deploymentName) { this.addDependency( context -> client.getByNameAsync(deploymentName) .map(deployment -> { originalDeployment = deployment; return (Indexable) deployment; }) ); return this; } @Override public SpringAppDeploymentImpl withCustomSetting() { ensureDeploySettings(); inner().properties().withDeploymentSettings(new DeploymentSettings()); return this; } @Override public SpringAppDeploymentImpl withInstance(int count) { ensureDeploySettings(); inner().properties().deploymentSettings().withInstanceCount(count); return this; } @Override public SpringAppDeploymentImpl withCpu(int cpuCount) { ensureDeploySettings(); inner().properties().deploymentSettings().withCpu(cpuCount); return this; } @Override public SpringAppDeploymentImpl withMemory(int sizeInGB) { ensureDeploySettings(); inner().properties().deploymentSettings().withMemoryInGB(sizeInGB); return this; } @Override public SpringAppDeploymentImpl withRuntime(RuntimeVersion version) { ensureDeploySettings(); inner().properties().deploymentSettings().withRuntimeVersion(version); return this; } @Override public SpringAppDeploymentImpl withJvmOptions(String jvmOptions) { ensureDeploySettings(); inner().properties().deploymentSettings().withJvmOptions(jvmOptions); return this; } private void ensureEnvironments() { ensureDeploySettings(); if (inner().properties().deploymentSettings().environmentVariables() == null) { inner().properties().deploymentSettings().withEnvironmentVariables(new HashMap<>()); } } @Override public SpringAppDeploymentImpl withEnvironment(String key, String value) { ensureEnvironments(); inner().properties().deploymentSettings().environmentVariables().put(key, value); return this; } @Override public SpringAppDeploymentImpl withoutEnvironment(String key) { ensureEnvironments(); inner().properties().deploymentSettings().environmentVariables().remove(key); return this; } @Override public SpringAppDeploymentImpl withVersionName(String versionName) { ensureSource(); inner().properties().source().withVersion(versionName); return this; } @Override public SpringAppDeploymentImpl activate() { this.addPostRunDependent( context -> parent().update().withActiveDeployment(name()).applyAsync() .map(app -> (Indexable) app) ); return this; } @Override public Mono<SpringAppDeployment> createResourceAsync() { if (originalDeployment != null) { ensureDeploySettings(); inner().properties().withDeploymentSettings(originalDeployment.settings()); } return manager().inner().getDeployments().createOrUpdateAsync( parent().parent().resourceGroupName(), parent().parent().name(), parent().name(), name(), inner().properties() ) .map(inner -> { originalDeployment = null; setInner(inner); return this; }); } @Override public Mono<SpringAppDeployment> updateResourceAsync() { if (originalDeployment != null) { ensureDeploySettings(); inner().properties().withDeploymentSettings(originalDeployment.settings()); } return manager().inner().getDeployments().updateAsync( parent().parent().resourceGroupName(), parent().parent().name(), parent().name(), name(), inner().properties() ) .map(inner -> { originalDeployment = null; setInner(inner); return this; }); } @Override public Mono<Void> deleteResourceAsync() { return manager().inner().getDeployments().deleteAsync( parent().parent().resourceGroupName(), parent().parent().name(), parent().name(), name() ); } @Override protected Mono<DeploymentResourceInner> getInnerAsync() { return manager().inner().getDeployments().getAsync( parent().parent().resourceGroupName(), parent().parent().name(), parent().name(), name() ); } @Override public String id() { return inner().id(); } @Override public SpringAppDeploymentImpl update() { prepareUpdate(); return this; } private AppPlatformManager manager() { return parent().manager(); } }
class SpringAppDeploymentImpl extends ExternalChildResourceImpl<SpringAppDeployment, DeploymentResourceInner, SpringAppImpl, SpringApp> implements SpringAppDeployment, SpringAppDeployment.Definition, SpringAppDeployment.Update { private static final int BLOCK_SIZE = 4 * 1024 * 1024; private final SpringAppDeploymentsImpl client; private DeploymentSettings originalDeploymentSettings; SpringAppDeploymentImpl(String name, SpringAppImpl parent, DeploymentResourceInner innerObject, SpringAppDeploymentsImpl client) { super(name, parent, innerObject); this.client = client; } @Override public String appName() { if (inner().properties() == null) { return null; } return inner().properties().appName(); } @Override public DeploymentSettings settings() { if (inner().properties() == null) { return null; } return inner().properties().deploymentSettings(); } @Override public DeploymentResourceStatus status() { if (inner().properties() == null) { return null; } return inner().properties().status(); } @Override public boolean isActive() { if (inner().properties() == null) { return false; } return inner().properties().active(); } @Override public OffsetDateTime createdTime() { if (inner().properties() == null) { return null; } return inner().properties().createdTime(); } @Override public List<DeploymentInstance> instances() { if (inner().properties() == null) { return null; } return inner().properties().instances(); } @Override public String getLogFileUrl() { return getLogFileUrlAsync().block(); } @Override public Mono<String> getLogFileUrlAsync() { return manager().inner().getDeployments().getLogFileUrlAsync( parent().parent().resourceGroupName(), parent().parent().name(), parent().name(), name() ) .map(LogFileUrlResponseInner::url); } private void ensureDeploySettings() { if (inner().properties() == null) { inner().withProperties(new DeploymentResourceProperties()); } if (inner().properties().deploymentSettings() == null) { inner().properties().withDeploymentSettings(new DeploymentSettings()); } } private void ensureSource() { if (inner().properties() == null) { inner().withProperties(new DeploymentResourceProperties()); } if (inner().properties().source() == null) { inner().properties().withSource(new UserSourceInfo()); } } private ShareFileAsyncClient createShareFileAsyncClient(ResourceUploadDefinition option) { return new ShareFileClientBuilder() .endpoint(option.uploadUrl()) .httpClient(manager().httpPipeline().getHttpClient()) .buildFileAsyncClient(); } private Mono<Void> uploadToStorage(File source, ResourceUploadDefinition option) { inner().properties().source().withRelativePath(option.relativePath()); try { ShareFileAsyncClient shareFileAsyncClient = createShareFileAsyncClient(option); return shareFileAsyncClient.create(source.length()) .flatMap(fileInfo -> shareFileAsyncClient.uploadFromFile(source.getAbsolutePath())) .then(Mono.empty()); } catch (Exception e) { return Mono.error(e); } } @Override public SpringAppDeploymentImpl withJarFile(File jar) { ensureSource(); inner().properties().source().withType(UserSourceType.JAR); this.addDependency( context -> parent().getResourceUploadUrlAsync() .flatMap(option -> uploadToStorage(jar, option) .then(context.voidMono())) ); return this; } @Override public SpringAppDeploymentImpl withSourceCodeFolder(File sourceCodeFolder) { ensureSource(); inner().properties().source().withType(UserSourceType.SOURCE); this.addDependency( context -> parent().getResourceUploadUrlAsync() .flatMap(option -> { try { return uploadToStorage(compressSource(sourceCodeFolder), option); } catch (Exception e) { return Mono.error(e); } }) .then(context.voidMono()) ); return this; } @Override public SpringAppDeploymentImpl withExistingSource(UserSourceType type, String relativePath) { ensureSource(); inner().properties().source().withType(type); inner().properties().source().withRelativePath(relativePath); return this; } @Override public SpringAppDeploymentImpl withSourceCodeTarGzFile(File sourceCodeTarGz) { ensureSource(); inner().properties().source().withType(UserSourceType.SOURCE); this.addDependency( context -> parent().getResourceUploadUrlAsync() .flatMap(option -> uploadToStorage(sourceCodeTarGz, option) .then(context.voidMono())) ); return this; } @Override public SpringAppDeploymentImpl withTargetModule(String moduleName) { ensureSource(); inner().properties().source().withArtifactSelector(moduleName); return this; } @Override public SpringAppDeploymentImpl withSingleModule() { ensureSource(); inner().properties().source().withArtifactSelector(null); return this; } @Override public SpringAppDeploymentImpl withSettingsFromActiveDeployment() { this.addDependency( context -> client.getByNameAsync(parent().activeDeployment()) .map(deployment -> { originalDeploymentSettings = deployment.settings(); return (Indexable) deployment; }) ); return this; } @Override public SpringAppDeploymentImpl withSettingsFromDeployment(SpringAppDeployment deployment) { originalDeploymentSettings = deployment.settings(); return this; } @Override public SpringAppDeploymentImpl withSettingsFromDeployment(String deploymentName) { this.addDependency( context -> client.getByNameAsync(deploymentName) .map(deployment -> { originalDeploymentSettings = deployment.settings(); return (Indexable) deployment; }) ); return this; } @Override public SpringAppDeploymentImpl withCustomSetting() { ensureDeploySettings(); inner().properties().withDeploymentSettings(new DeploymentSettings()); return this; } @Override public SpringAppDeploymentImpl withInstance(int count) { ensureDeploySettings(); inner().properties().deploymentSettings().withInstanceCount(count); return this; } @Override public SpringAppDeploymentImpl withCpu(int cpuCount) { ensureDeploySettings(); inner().properties().deploymentSettings().withCpu(cpuCount); return this; } @Override public SpringAppDeploymentImpl withMemory(int sizeInGB) { ensureDeploySettings(); inner().properties().deploymentSettings().withMemoryInGB(sizeInGB); return this; } @Override public SpringAppDeploymentImpl withRuntime(RuntimeVersion version) { ensureDeploySettings(); inner().properties().deploymentSettings().withRuntimeVersion(version); return this; } @Override public SpringAppDeploymentImpl withJvmOptions(String jvmOptions) { ensureDeploySettings(); inner().properties().deploymentSettings().withJvmOptions(jvmOptions); return this; } private void ensureEnvironments() { ensureDeploySettings(); if (inner().properties().deploymentSettings().environmentVariables() == null) { inner().properties().deploymentSettings().withEnvironmentVariables(new HashMap<>()); } } @Override public SpringAppDeploymentImpl withEnvironment(String key, String value) { ensureEnvironments(); inner().properties().deploymentSettings().environmentVariables().put(key, value); return this; } @Override public SpringAppDeploymentImpl withoutEnvironment(String key) { ensureEnvironments(); inner().properties().deploymentSettings().environmentVariables().remove(key); return this; } @Override public SpringAppDeploymentImpl withVersionName(String versionName) { ensureSource(); inner().properties().source().withVersion(versionName); return this; } @Override public SpringAppDeploymentImpl withActivation() { this.addPostRunDependent( context -> parent().update().withActiveDeployment(name()).applyAsync() .map(app -> (Indexable) app) ); return this; } @Override public Mono<SpringAppDeployment> createResourceAsync() { if (originalDeploymentSettings != null) { ensureDeploySettings(); inner().properties().withDeploymentSettings(originalDeploymentSettings); } return manager().inner().getDeployments().createOrUpdateAsync( parent().parent().resourceGroupName(), parent().parent().name(), parent().name(), name(), inner().properties() ) .map(inner -> { originalDeploymentSettings = null; setInner(inner); return this; }); } @Override public Mono<SpringAppDeployment> updateResourceAsync() { if (originalDeploymentSettings != null) { ensureDeploySettings(); inner().properties().withDeploymentSettings(originalDeploymentSettings); } return manager().inner().getDeployments().updateAsync( parent().parent().resourceGroupName(), parent().parent().name(), parent().name(), name(), inner().properties() ) .map(inner -> { originalDeploymentSettings = null; setInner(inner); return this; }); } @Override public Mono<Void> deleteResourceAsync() { return manager().inner().getDeployments().deleteAsync( parent().parent().resourceGroupName(), parent().parent().name(), parent().name(), name() ); } @Override protected Mono<DeploymentResourceInner> getInnerAsync() { return manager().inner().getDeployments().getAsync( parent().parent().resourceGroupName(), parent().parent().name(), parent().name(), name() ); } @Override public String id() { return inner().id(); } @Override public SpringAppDeploymentImpl update() { prepareUpdate(); return this; } private AppPlatformManager manager() { return parent().manager(); } }
This method is only used once. Why don't we roll in the functionality.
void singleUnnamedSession(MessagingEntityType entityType) { final int entityIndex = 0; final String messageId = "singleUnnamedSession"; final String sessionId = "singleUnnamedSession-" + Instant.now().toString(); final String contents = "Some-contents"; final int numberToSend = 5; final List<String> lockTokens = new ArrayList<>(); setSenderAndReceiver(entityType, entityIndex, TIMEOUT, builder -> builder.maxAutoLockRenewalDuration(Duration.ofMinutes(2))); final Disposable subscription = Flux.interval(Duration.ofMillis(500)) .take(numberToSend) .flatMap(index -> { final ServiceBusMessage message = getServiceBusMessage(contents, messageId) .setSessionId(sessionId); messagesPending.incrementAndGet(); return sender.send(message).thenReturn(index); }).subscribe( number -> logger.info("sessionId[{}] sent[{}] Message sent.", sessionId, number), error -> logger.error("sessionId[{}] Error encountered.", sessionId, error), () -> logger.info("sessionId[{}] Finished sending.", sessionId)); try { StepVerifier.create(receiver.receive()) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .thenCancel() .verify(Duration.ofMinutes(2)); } finally { subscription.dispose(); completeMessages(receiver, lockTokens, sessionId); } }
completeMessages(receiver, lockTokens, sessionId);
void singleUnnamedSession(MessagingEntityType entityType) { final int entityIndex = TestUtils.USE_CASE_SINGLE_SESSION; final String messageId = "singleUnnamedSession"; final String sessionId = "singleUnnamedSession-" + Instant.now().toString(); final String contents = "Some-contents"; final int numberToSend = 5; final List<String> lockTokens = new ArrayList<>(); setSenderAndReceiver(entityType, entityIndex, TIMEOUT, builder -> builder.maxAutoLockRenewalDuration(Duration.ofMinutes(2))); final Disposable subscription = Flux.interval(Duration.ofMillis(500)) .take(numberToSend) .flatMap(index -> { final ServiceBusMessage message = getServiceBusMessage(contents, messageId) .setSessionId(sessionId); messagesPending.incrementAndGet(); return sender.send(message).thenReturn(index); }).subscribe( number -> logger.info("sessionId[{}] sent[{}] Message sent.", sessionId, number), error -> logger.error("sessionId[{}] Error encountered.", sessionId, error), () -> logger.info("sessionId[{}] Finished sending.", sessionId)); try { StepVerifier.create(receiver.receive()) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .thenCancel() .verify(Duration.ofMinutes(2)); } finally { subscription.dispose(); Mono.when(lockTokens.stream().map(e -> receiver.complete(MessageLockToken.fromString(e), sessionId)) .collect(Collectors.toList())) .block(TIMEOUT); } }
class UnnamedSessionManagerIntegrationTest extends IntegrationTestBase { private final AtomicInteger messagesPending = new AtomicInteger(); private ServiceBusReceiverAsyncClient receiver; private ServiceBusSenderAsyncClient sender; UnnamedSessionManagerIntegrationTest() { super(new ClientLogger(UnnamedSessionManagerIntegrationTest.class)); } @Override protected void beforeTest() { sessionId = UUID.randomUUID().toString(); } @Override protected void afterTest() { final int pending = messagesPending.get(); logger.info("Pending messages: {}", pending); } @ParameterizedTest @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase /** * Verifies that we can roll over to a next session. */ @Test void multipleSessions() { final int entityIndex = TestUtils.USE_CASE_MULTIPLE_SESSION; final String messageId = "singleUnnamedSession"; final String now = Instant.now().toString(); final List<String> sessionIds = IntStream.range(0, 3) .mapToObj(number -> String.join("-", String.valueOf(number), "singleUnnamedSession", now)) .collect(Collectors.toList()); logger.info("------ Session ids ------"); for (int i = 0; i < sessionIds.size(); i++) { logger.info("[{}]: {}", i, sessionIds.get(i)); } final String contents = "Some-contents"; final int numberToSend = 3; final int maxMessages = numberToSend * sessionIds.size(); final int maxConcurrency = 2; final Set<String> set = new HashSet<>(); setSenderAndReceiver(MessagingEntityType.SUBSCRIPTION, entityIndex, Duration.ofSeconds(20), builder -> builder.maxConcurrentSessions(maxConcurrency).maxAutoLockRenewalDuration(Duration.ofMinutes(2))); final Disposable subscription = Flux.interval(Duration.ofMillis(500)) .take(maxMessages) .flatMap(index -> { final int i = (int) (index % sessionIds.size()); final String id = sessionIds.get(i); final ServiceBusMessage message = getServiceBusMessage(contents, messageId) .setSessionId(id); messagesPending.incrementAndGet(); return sender.send(message).thenReturn( String.format("sessionId[%s] sent[%s] Message sent.", id, index)); }).subscribe( message -> logger.info(message), error -> logger.error("Error encountered.", error), () -> logger.info("Finished sending.")); try { StepVerifier.create(receiver.receive()) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .thenCancel() .verify(Duration.ofMinutes(2)); } finally { subscription.dispose(); } } private void assertFromSession(List<String> sessionIds, Set<String> currentSessions, int maxSize, String messageId, String contents, ServiceBusReceivedMessageContext context) { logger.info("Verifying message: {}", context.getSessionId()); assertNotNull(context.getSessionId()); assertTrue(sessionIds.contains(context.getSessionId())); if (currentSessions.add(context.getSessionId())) { logger.info("Adding sessionId: {}", context.getSessionId()); } assertTrue(currentSessions.size() <= maxSize, String.format( "Current size (%s) is larger than max (%s).", currentSessions.size(), maxSize)); assertMessageEquals(null, messageId, contents, context); } /** * Sets the sender and receiver. If session is enabled, then a single-named session receiver is created. */ private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, Duration operationTimeout, Function<ServiceBusSessionReceiverClientBuilder, ServiceBusSessionReceiverClientBuilder> onBuild) { this.sender = getSenderBuilder(false, entityType, entityIndex, true, false) .buildAsyncClient(); ServiceBusSessionReceiverClientBuilder sessionBuilder = getSessionReceiverBuilder(false, entityType, entityIndex, builder -> builder.retryOptions(new AmqpRetryOptions().setTryTimeout(operationTimeout)), false); this.receiver = onBuild.apply(sessionBuilder).buildAsyncClient(); } private static void assertMessageEquals(String sessionId, String messageId, String contents, ServiceBusReceivedMessageContext actual) { ServiceBusReceivedMessage message = actual.getMessage(); assertNotNull(message, "'message' should not be null. Error? " + actual.getThrowable()); if (!CoreUtils.isNullOrEmpty(sessionId)) { assertEquals(sessionId, message.getSessionId()); } assertEquals(messageId, message.getMessageId()); assertEquals(contents, new String(message.getBody(), StandardCharsets.UTF_8)); assertNull(actual.getThrowable()); } private int completeMessages(ServiceBusReceiverAsyncClient client, List<String> lockTokens, String sessionId) { Mono.when(lockTokens.stream().map(e -> client.complete(MessageLockToken.fromString(e), sessionId)) .collect(Collectors.toList())) .block(TIMEOUT); return lockTokens.size(); } }
class UnnamedSessionManagerIntegrationTest extends IntegrationTestBase { private final AtomicInteger messagesPending = new AtomicInteger(); private ServiceBusReceiverAsyncClient receiver; private ServiceBusSenderAsyncClient sender; UnnamedSessionManagerIntegrationTest() { super(new ClientLogger(UnnamedSessionManagerIntegrationTest.class)); } @Override protected void beforeTest() { sessionId = UUID.randomUUID().toString(); } @Override protected void afterTest() { final int pending = messagesPending.get(); logger.info("Pending messages: {}", pending); } @ParameterizedTest @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase /** * Verifies that we can roll over to a next session. */ @Test void multipleSessions() { final int entityIndex = TestUtils.USE_CASE_MULTIPLE_SESSION; final String messageId = "singleUnnamedSession"; final String now = Instant.now().toString(); final List<String> sessionIds = IntStream.range(0, 3) .mapToObj(number -> String.join("-", String.valueOf(number), "singleUnnamedSession", now)) .collect(Collectors.toList()); logger.info("------ Session ids ------"); for (int i = 0; i < sessionIds.size(); i++) { logger.info("[{}]: {}", i, sessionIds.get(i)); } final String contents = "Some-contents"; final int numberToSend = 3; final int maxMessages = numberToSend * sessionIds.size(); final int maxConcurrency = 2; final Set<String> set = new HashSet<>(); setSenderAndReceiver(MessagingEntityType.SUBSCRIPTION, entityIndex, Duration.ofSeconds(20), builder -> builder.maxConcurrentSessions(maxConcurrency).maxAutoLockRenewalDuration(Duration.ofMinutes(2))); final Disposable subscription = Flux.interval(Duration.ofMillis(500)) .take(maxMessages) .flatMap(index -> { final int i = (int) (index % sessionIds.size()); final String id = sessionIds.get(i); final ServiceBusMessage message = getServiceBusMessage(contents, messageId) .setSessionId(id); messagesPending.incrementAndGet(); return sender.send(message).thenReturn( String.format("sessionId[%s] sent[%s] Message sent.", id, index)); }).subscribe( message -> logger.info(message), error -> logger.error("Error encountered.", error), () -> logger.info("Finished sending.")); try { StepVerifier.create(receiver.receive()) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .thenCancel() .verify(Duration.ofMinutes(2)); } finally { subscription.dispose(); } } private void assertFromSession(List<String> sessionIds, Set<String> currentSessions, int maxSize, String messageId, String contents, ServiceBusReceivedMessageContext context) { logger.info("Verifying message: {}", context.getSessionId()); assertNotNull(context.getSessionId()); assertTrue(sessionIds.contains(context.getSessionId())); if (currentSessions.add(context.getSessionId())) { logger.info("Adding sessionId: {}", context.getSessionId()); } assertTrue(currentSessions.size() <= maxSize, String.format( "Current size (%s) is larger than max (%s).", currentSessions.size(), maxSize)); assertMessageEquals(null, messageId, contents, context); } /** * Sets the sender and receiver. If session is enabled, then a single-named session receiver is created. */ private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, Duration operationTimeout, Function<ServiceBusSessionReceiverClientBuilder, ServiceBusSessionReceiverClientBuilder> onBuild) { this.sender = getSenderBuilder(false, entityType, entityIndex, true, false) .buildAsyncClient(); ServiceBusSessionReceiverClientBuilder sessionBuilder = getSessionReceiverBuilder(false, entityType, entityIndex, builder -> builder.retryOptions(new AmqpRetryOptions().setTryTimeout(operationTimeout)), false); this.receiver = onBuild.apply(sessionBuilder).buildAsyncClient(); } private static void assertMessageEquals(String sessionId, String messageId, String contents, ServiceBusReceivedMessageContext actual) { ServiceBusReceivedMessage message = actual.getMessage(); assertNotNull(message, "'message' should not be null. Error? " + actual.getThrowable()); if (!CoreUtils.isNullOrEmpty(sessionId)) { assertEquals(sessionId, message.getSessionId()); } assertEquals(messageId, message.getMessageId()); assertEquals(contents, new String(message.getBody(), StandardCharsets.UTF_8)); assertNull(actual.getThrowable()); } }
I think, it is cleaner and improve readability.
void singleUnnamedSession(MessagingEntityType entityType) { final int entityIndex = 0; final String messageId = "singleUnnamedSession"; final String sessionId = "singleUnnamedSession-" + Instant.now().toString(); final String contents = "Some-contents"; final int numberToSend = 5; final List<String> lockTokens = new ArrayList<>(); setSenderAndReceiver(entityType, entityIndex, TIMEOUT, builder -> builder.maxAutoLockRenewalDuration(Duration.ofMinutes(2))); final Disposable subscription = Flux.interval(Duration.ofMillis(500)) .take(numberToSend) .flatMap(index -> { final ServiceBusMessage message = getServiceBusMessage(contents, messageId) .setSessionId(sessionId); messagesPending.incrementAndGet(); return sender.send(message).thenReturn(index); }).subscribe( number -> logger.info("sessionId[{}] sent[{}] Message sent.", sessionId, number), error -> logger.error("sessionId[{}] Error encountered.", sessionId, error), () -> logger.info("sessionId[{}] Finished sending.", sessionId)); try { StepVerifier.create(receiver.receive()) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .thenCancel() .verify(Duration.ofMinutes(2)); } finally { subscription.dispose(); completeMessages(receiver, lockTokens, sessionId); } }
completeMessages(receiver, lockTokens, sessionId);
void singleUnnamedSession(MessagingEntityType entityType) { final int entityIndex = TestUtils.USE_CASE_SINGLE_SESSION; final String messageId = "singleUnnamedSession"; final String sessionId = "singleUnnamedSession-" + Instant.now().toString(); final String contents = "Some-contents"; final int numberToSend = 5; final List<String> lockTokens = new ArrayList<>(); setSenderAndReceiver(entityType, entityIndex, TIMEOUT, builder -> builder.maxAutoLockRenewalDuration(Duration.ofMinutes(2))); final Disposable subscription = Flux.interval(Duration.ofMillis(500)) .take(numberToSend) .flatMap(index -> { final ServiceBusMessage message = getServiceBusMessage(contents, messageId) .setSessionId(sessionId); messagesPending.incrementAndGet(); return sender.send(message).thenReturn(index); }).subscribe( number -> logger.info("sessionId[{}] sent[{}] Message sent.", sessionId, number), error -> logger.error("sessionId[{}] Error encountered.", sessionId, error), () -> logger.info("sessionId[{}] Finished sending.", sessionId)); try { StepVerifier.create(receiver.receive()) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .thenCancel() .verify(Duration.ofMinutes(2)); } finally { subscription.dispose(); Mono.when(lockTokens.stream().map(e -> receiver.complete(MessageLockToken.fromString(e), sessionId)) .collect(Collectors.toList())) .block(TIMEOUT); } }
class UnnamedSessionManagerIntegrationTest extends IntegrationTestBase { private final AtomicInteger messagesPending = new AtomicInteger(); private ServiceBusReceiverAsyncClient receiver; private ServiceBusSenderAsyncClient sender; UnnamedSessionManagerIntegrationTest() { super(new ClientLogger(UnnamedSessionManagerIntegrationTest.class)); } @Override protected void beforeTest() { sessionId = UUID.randomUUID().toString(); } @Override protected void afterTest() { final int pending = messagesPending.get(); logger.info("Pending messages: {}", pending); } @ParameterizedTest @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase /** * Verifies that we can roll over to a next session. */ @Test void multipleSessions() { final int entityIndex = TestUtils.USE_CASE_MULTIPLE_SESSION; final String messageId = "singleUnnamedSession"; final String now = Instant.now().toString(); final List<String> sessionIds = IntStream.range(0, 3) .mapToObj(number -> String.join("-", String.valueOf(number), "singleUnnamedSession", now)) .collect(Collectors.toList()); logger.info("------ Session ids ------"); for (int i = 0; i < sessionIds.size(); i++) { logger.info("[{}]: {}", i, sessionIds.get(i)); } final String contents = "Some-contents"; final int numberToSend = 3; final int maxMessages = numberToSend * sessionIds.size(); final int maxConcurrency = 2; final Set<String> set = new HashSet<>(); setSenderAndReceiver(MessagingEntityType.SUBSCRIPTION, entityIndex, Duration.ofSeconds(20), builder -> builder.maxConcurrentSessions(maxConcurrency).maxAutoLockRenewalDuration(Duration.ofMinutes(2))); final Disposable subscription = Flux.interval(Duration.ofMillis(500)) .take(maxMessages) .flatMap(index -> { final int i = (int) (index % sessionIds.size()); final String id = sessionIds.get(i); final ServiceBusMessage message = getServiceBusMessage(contents, messageId) .setSessionId(id); messagesPending.incrementAndGet(); return sender.send(message).thenReturn( String.format("sessionId[%s] sent[%s] Message sent.", id, index)); }).subscribe( message -> logger.info(message), error -> logger.error("Error encountered.", error), () -> logger.info("Finished sending.")); try { StepVerifier.create(receiver.receive()) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .thenCancel() .verify(Duration.ofMinutes(2)); } finally { subscription.dispose(); } } private void assertFromSession(List<String> sessionIds, Set<String> currentSessions, int maxSize, String messageId, String contents, ServiceBusReceivedMessageContext context) { logger.info("Verifying message: {}", context.getSessionId()); assertNotNull(context.getSessionId()); assertTrue(sessionIds.contains(context.getSessionId())); if (currentSessions.add(context.getSessionId())) { logger.info("Adding sessionId: {}", context.getSessionId()); } assertTrue(currentSessions.size() <= maxSize, String.format( "Current size (%s) is larger than max (%s).", currentSessions.size(), maxSize)); assertMessageEquals(null, messageId, contents, context); } /** * Sets the sender and receiver. If session is enabled, then a single-named session receiver is created. */ private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, Duration operationTimeout, Function<ServiceBusSessionReceiverClientBuilder, ServiceBusSessionReceiverClientBuilder> onBuild) { this.sender = getSenderBuilder(false, entityType, entityIndex, true, false) .buildAsyncClient(); ServiceBusSessionReceiverClientBuilder sessionBuilder = getSessionReceiverBuilder(false, entityType, entityIndex, builder -> builder.retryOptions(new AmqpRetryOptions().setTryTimeout(operationTimeout)), false); this.receiver = onBuild.apply(sessionBuilder).buildAsyncClient(); } private static void assertMessageEquals(String sessionId, String messageId, String contents, ServiceBusReceivedMessageContext actual) { ServiceBusReceivedMessage message = actual.getMessage(); assertNotNull(message, "'message' should not be null. Error? " + actual.getThrowable()); if (!CoreUtils.isNullOrEmpty(sessionId)) { assertEquals(sessionId, message.getSessionId()); } assertEquals(messageId, message.getMessageId()); assertEquals(contents, new String(message.getBody(), StandardCharsets.UTF_8)); assertNull(actual.getThrowable()); } private int completeMessages(ServiceBusReceiverAsyncClient client, List<String> lockTokens, String sessionId) { Mono.when(lockTokens.stream().map(e -> client.complete(MessageLockToken.fromString(e), sessionId)) .collect(Collectors.toList())) .block(TIMEOUT); return lockTokens.size(); } }
class UnnamedSessionManagerIntegrationTest extends IntegrationTestBase { private final AtomicInteger messagesPending = new AtomicInteger(); private ServiceBusReceiverAsyncClient receiver; private ServiceBusSenderAsyncClient sender; UnnamedSessionManagerIntegrationTest() { super(new ClientLogger(UnnamedSessionManagerIntegrationTest.class)); } @Override protected void beforeTest() { sessionId = UUID.randomUUID().toString(); } @Override protected void afterTest() { final int pending = messagesPending.get(); logger.info("Pending messages: {}", pending); } @ParameterizedTest @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase /** * Verifies that we can roll over to a next session. */ @Test void multipleSessions() { final int entityIndex = TestUtils.USE_CASE_MULTIPLE_SESSION; final String messageId = "singleUnnamedSession"; final String now = Instant.now().toString(); final List<String> sessionIds = IntStream.range(0, 3) .mapToObj(number -> String.join("-", String.valueOf(number), "singleUnnamedSession", now)) .collect(Collectors.toList()); logger.info("------ Session ids ------"); for (int i = 0; i < sessionIds.size(); i++) { logger.info("[{}]: {}", i, sessionIds.get(i)); } final String contents = "Some-contents"; final int numberToSend = 3; final int maxMessages = numberToSend * sessionIds.size(); final int maxConcurrency = 2; final Set<String> set = new HashSet<>(); setSenderAndReceiver(MessagingEntityType.SUBSCRIPTION, entityIndex, Duration.ofSeconds(20), builder -> builder.maxConcurrentSessions(maxConcurrency).maxAutoLockRenewalDuration(Duration.ofMinutes(2))); final Disposable subscription = Flux.interval(Duration.ofMillis(500)) .take(maxMessages) .flatMap(index -> { final int i = (int) (index % sessionIds.size()); final String id = sessionIds.get(i); final ServiceBusMessage message = getServiceBusMessage(contents, messageId) .setSessionId(id); messagesPending.incrementAndGet(); return sender.send(message).thenReturn( String.format("sessionId[%s] sent[%s] Message sent.", id, index)); }).subscribe( message -> logger.info(message), error -> logger.error("Error encountered.", error), () -> logger.info("Finished sending.")); try { StepVerifier.create(receiver.receive()) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .thenCancel() .verify(Duration.ofMinutes(2)); } finally { subscription.dispose(); } } private void assertFromSession(List<String> sessionIds, Set<String> currentSessions, int maxSize, String messageId, String contents, ServiceBusReceivedMessageContext context) { logger.info("Verifying message: {}", context.getSessionId()); assertNotNull(context.getSessionId()); assertTrue(sessionIds.contains(context.getSessionId())); if (currentSessions.add(context.getSessionId())) { logger.info("Adding sessionId: {}", context.getSessionId()); } assertTrue(currentSessions.size() <= maxSize, String.format( "Current size (%s) is larger than max (%s).", currentSessions.size(), maxSize)); assertMessageEquals(null, messageId, contents, context); } /** * Sets the sender and receiver. If session is enabled, then a single-named session receiver is created. */ private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, Duration operationTimeout, Function<ServiceBusSessionReceiverClientBuilder, ServiceBusSessionReceiverClientBuilder> onBuild) { this.sender = getSenderBuilder(false, entityType, entityIndex, true, false) .buildAsyncClient(); ServiceBusSessionReceiverClientBuilder sessionBuilder = getSessionReceiverBuilder(false, entityType, entityIndex, builder -> builder.retryOptions(new AmqpRetryOptions().setTryTimeout(operationTimeout)), false); this.receiver = onBuild.apply(sessionBuilder).buildAsyncClient(); } private static void assertMessageEquals(String sessionId, String messageId, String contents, ServiceBusReceivedMessageContext actual) { ServiceBusReceivedMessage message = actual.getMessage(); assertNotNull(message, "'message' should not be null. Error? " + actual.getThrowable()); if (!CoreUtils.isNullOrEmpty(sessionId)) { assertEquals(sessionId, message.getSessionId()); } assertEquals(messageId, message.getMessageId()); assertEquals(contents, new String(message.getBody(), StandardCharsets.UTF_8)); assertNull(actual.getThrowable()); } }
I don't think so. Methods are reusable pieces of code.
void singleUnnamedSession(MessagingEntityType entityType) { final int entityIndex = 0; final String messageId = "singleUnnamedSession"; final String sessionId = "singleUnnamedSession-" + Instant.now().toString(); final String contents = "Some-contents"; final int numberToSend = 5; final List<String> lockTokens = new ArrayList<>(); setSenderAndReceiver(entityType, entityIndex, TIMEOUT, builder -> builder.maxAutoLockRenewalDuration(Duration.ofMinutes(2))); final Disposable subscription = Flux.interval(Duration.ofMillis(500)) .take(numberToSend) .flatMap(index -> { final ServiceBusMessage message = getServiceBusMessage(contents, messageId) .setSessionId(sessionId); messagesPending.incrementAndGet(); return sender.send(message).thenReturn(index); }).subscribe( number -> logger.info("sessionId[{}] sent[{}] Message sent.", sessionId, number), error -> logger.error("sessionId[{}] Error encountered.", sessionId, error), () -> logger.info("sessionId[{}] Finished sending.", sessionId)); try { StepVerifier.create(receiver.receive()) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .thenCancel() .verify(Duration.ofMinutes(2)); } finally { subscription.dispose(); completeMessages(receiver, lockTokens, sessionId); } }
completeMessages(receiver, lockTokens, sessionId);
void singleUnnamedSession(MessagingEntityType entityType) { final int entityIndex = TestUtils.USE_CASE_SINGLE_SESSION; final String messageId = "singleUnnamedSession"; final String sessionId = "singleUnnamedSession-" + Instant.now().toString(); final String contents = "Some-contents"; final int numberToSend = 5; final List<String> lockTokens = new ArrayList<>(); setSenderAndReceiver(entityType, entityIndex, TIMEOUT, builder -> builder.maxAutoLockRenewalDuration(Duration.ofMinutes(2))); final Disposable subscription = Flux.interval(Duration.ofMillis(500)) .take(numberToSend) .flatMap(index -> { final ServiceBusMessage message = getServiceBusMessage(contents, messageId) .setSessionId(sessionId); messagesPending.incrementAndGet(); return sender.send(message).thenReturn(index); }).subscribe( number -> logger.info("sessionId[{}] sent[{}] Message sent.", sessionId, number), error -> logger.error("sessionId[{}] Error encountered.", sessionId, error), () -> logger.info("sessionId[{}] Finished sending.", sessionId)); try { StepVerifier.create(receiver.receive()) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .thenCancel() .verify(Duration.ofMinutes(2)); } finally { subscription.dispose(); Mono.when(lockTokens.stream().map(e -> receiver.complete(MessageLockToken.fromString(e), sessionId)) .collect(Collectors.toList())) .block(TIMEOUT); } }
class UnnamedSessionManagerIntegrationTest extends IntegrationTestBase { private final AtomicInteger messagesPending = new AtomicInteger(); private ServiceBusReceiverAsyncClient receiver; private ServiceBusSenderAsyncClient sender; UnnamedSessionManagerIntegrationTest() { super(new ClientLogger(UnnamedSessionManagerIntegrationTest.class)); } @Override protected void beforeTest() { sessionId = UUID.randomUUID().toString(); } @Override protected void afterTest() { final int pending = messagesPending.get(); logger.info("Pending messages: {}", pending); } @ParameterizedTest @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase /** * Verifies that we can roll over to a next session. */ @Test void multipleSessions() { final int entityIndex = TestUtils.USE_CASE_MULTIPLE_SESSION; final String messageId = "singleUnnamedSession"; final String now = Instant.now().toString(); final List<String> sessionIds = IntStream.range(0, 3) .mapToObj(number -> String.join("-", String.valueOf(number), "singleUnnamedSession", now)) .collect(Collectors.toList()); logger.info("------ Session ids ------"); for (int i = 0; i < sessionIds.size(); i++) { logger.info("[{}]: {}", i, sessionIds.get(i)); } final String contents = "Some-contents"; final int numberToSend = 3; final int maxMessages = numberToSend * sessionIds.size(); final int maxConcurrency = 2; final Set<String> set = new HashSet<>(); setSenderAndReceiver(MessagingEntityType.SUBSCRIPTION, entityIndex, Duration.ofSeconds(20), builder -> builder.maxConcurrentSessions(maxConcurrency).maxAutoLockRenewalDuration(Duration.ofMinutes(2))); final Disposable subscription = Flux.interval(Duration.ofMillis(500)) .take(maxMessages) .flatMap(index -> { final int i = (int) (index % sessionIds.size()); final String id = sessionIds.get(i); final ServiceBusMessage message = getServiceBusMessage(contents, messageId) .setSessionId(id); messagesPending.incrementAndGet(); return sender.send(message).thenReturn( String.format("sessionId[%s] sent[%s] Message sent.", id, index)); }).subscribe( message -> logger.info(message), error -> logger.error("Error encountered.", error), () -> logger.info("Finished sending.")); try { StepVerifier.create(receiver.receive()) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .thenCancel() .verify(Duration.ofMinutes(2)); } finally { subscription.dispose(); } } private void assertFromSession(List<String> sessionIds, Set<String> currentSessions, int maxSize, String messageId, String contents, ServiceBusReceivedMessageContext context) { logger.info("Verifying message: {}", context.getSessionId()); assertNotNull(context.getSessionId()); assertTrue(sessionIds.contains(context.getSessionId())); if (currentSessions.add(context.getSessionId())) { logger.info("Adding sessionId: {}", context.getSessionId()); } assertTrue(currentSessions.size() <= maxSize, String.format( "Current size (%s) is larger than max (%s).", currentSessions.size(), maxSize)); assertMessageEquals(null, messageId, contents, context); } /** * Sets the sender and receiver. If session is enabled, then a single-named session receiver is created. */ private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, Duration operationTimeout, Function<ServiceBusSessionReceiverClientBuilder, ServiceBusSessionReceiverClientBuilder> onBuild) { this.sender = getSenderBuilder(false, entityType, entityIndex, true, false) .buildAsyncClient(); ServiceBusSessionReceiverClientBuilder sessionBuilder = getSessionReceiverBuilder(false, entityType, entityIndex, builder -> builder.retryOptions(new AmqpRetryOptions().setTryTimeout(operationTimeout)), false); this.receiver = onBuild.apply(sessionBuilder).buildAsyncClient(); } private static void assertMessageEquals(String sessionId, String messageId, String contents, ServiceBusReceivedMessageContext actual) { ServiceBusReceivedMessage message = actual.getMessage(); assertNotNull(message, "'message' should not be null. Error? " + actual.getThrowable()); if (!CoreUtils.isNullOrEmpty(sessionId)) { assertEquals(sessionId, message.getSessionId()); } assertEquals(messageId, message.getMessageId()); assertEquals(contents, new String(message.getBody(), StandardCharsets.UTF_8)); assertNull(actual.getThrowable()); } private int completeMessages(ServiceBusReceiverAsyncClient client, List<String> lockTokens, String sessionId) { Mono.when(lockTokens.stream().map(e -> client.complete(MessageLockToken.fromString(e), sessionId)) .collect(Collectors.toList())) .block(TIMEOUT); return lockTokens.size(); } }
class UnnamedSessionManagerIntegrationTest extends IntegrationTestBase { private final AtomicInteger messagesPending = new AtomicInteger(); private ServiceBusReceiverAsyncClient receiver; private ServiceBusSenderAsyncClient sender; UnnamedSessionManagerIntegrationTest() { super(new ClientLogger(UnnamedSessionManagerIntegrationTest.class)); } @Override protected void beforeTest() { sessionId = UUID.randomUUID().toString(); } @Override protected void afterTest() { final int pending = messagesPending.get(); logger.info("Pending messages: {}", pending); } @ParameterizedTest @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase /** * Verifies that we can roll over to a next session. */ @Test void multipleSessions() { final int entityIndex = TestUtils.USE_CASE_MULTIPLE_SESSION; final String messageId = "singleUnnamedSession"; final String now = Instant.now().toString(); final List<String> sessionIds = IntStream.range(0, 3) .mapToObj(number -> String.join("-", String.valueOf(number), "singleUnnamedSession", now)) .collect(Collectors.toList()); logger.info("------ Session ids ------"); for (int i = 0; i < sessionIds.size(); i++) { logger.info("[{}]: {}", i, sessionIds.get(i)); } final String contents = "Some-contents"; final int numberToSend = 3; final int maxMessages = numberToSend * sessionIds.size(); final int maxConcurrency = 2; final Set<String> set = new HashSet<>(); setSenderAndReceiver(MessagingEntityType.SUBSCRIPTION, entityIndex, Duration.ofSeconds(20), builder -> builder.maxConcurrentSessions(maxConcurrency).maxAutoLockRenewalDuration(Duration.ofMinutes(2))); final Disposable subscription = Flux.interval(Duration.ofMillis(500)) .take(maxMessages) .flatMap(index -> { final int i = (int) (index % sessionIds.size()); final String id = sessionIds.get(i); final ServiceBusMessage message = getServiceBusMessage(contents, messageId) .setSessionId(id); messagesPending.incrementAndGet(); return sender.send(message).thenReturn( String.format("sessionId[%s] sent[%s] Message sent.", id, index)); }).subscribe( message -> logger.info(message), error -> logger.error("Error encountered.", error), () -> logger.info("Finished sending.")); try { StepVerifier.create(receiver.receive()) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .thenCancel() .verify(Duration.ofMinutes(2)); } finally { subscription.dispose(); } } private void assertFromSession(List<String> sessionIds, Set<String> currentSessions, int maxSize, String messageId, String contents, ServiceBusReceivedMessageContext context) { logger.info("Verifying message: {}", context.getSessionId()); assertNotNull(context.getSessionId()); assertTrue(sessionIds.contains(context.getSessionId())); if (currentSessions.add(context.getSessionId())) { logger.info("Adding sessionId: {}", context.getSessionId()); } assertTrue(currentSessions.size() <= maxSize, String.format( "Current size (%s) is larger than max (%s).", currentSessions.size(), maxSize)); assertMessageEquals(null, messageId, contents, context); } /** * Sets the sender and receiver. If session is enabled, then a single-named session receiver is created. */ private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, Duration operationTimeout, Function<ServiceBusSessionReceiverClientBuilder, ServiceBusSessionReceiverClientBuilder> onBuild) { this.sender = getSenderBuilder(false, entityType, entityIndex, true, false) .buildAsyncClient(); ServiceBusSessionReceiverClientBuilder sessionBuilder = getSessionReceiverBuilder(false, entityType, entityIndex, builder -> builder.retryOptions(new AmqpRetryOptions().setTryTimeout(operationTimeout)), false); this.receiver = onBuild.apply(sessionBuilder).buildAsyncClient(); } private static void assertMessageEquals(String sessionId, String messageId, String contents, ServiceBusReceivedMessageContext actual) { ServiceBusReceivedMessage message = actual.getMessage(); assertNotNull(message, "'message' should not be null. Error? " + actual.getThrowable()); if (!CoreUtils.isNullOrEmpty(sessionId)) { assertEquals(sessionId, message.getSessionId()); } assertEquals(messageId, message.getMessageId()); assertEquals(contents, new String(message.getBody(), StandardCharsets.UTF_8)); assertNull(actual.getThrowable()); } }
This is an extra 3 lines. or one statement. You don't even use the return value.
void singleUnnamedSession(MessagingEntityType entityType) { final int entityIndex = 0; final String messageId = "singleUnnamedSession"; final String sessionId = "singleUnnamedSession-" + Instant.now().toString(); final String contents = "Some-contents"; final int numberToSend = 5; final List<String> lockTokens = new ArrayList<>(); setSenderAndReceiver(entityType, entityIndex, TIMEOUT, builder -> builder.maxAutoLockRenewalDuration(Duration.ofMinutes(2))); final Disposable subscription = Flux.interval(Duration.ofMillis(500)) .take(numberToSend) .flatMap(index -> { final ServiceBusMessage message = getServiceBusMessage(contents, messageId) .setSessionId(sessionId); messagesPending.incrementAndGet(); return sender.send(message).thenReturn(index); }).subscribe( number -> logger.info("sessionId[{}] sent[{}] Message sent.", sessionId, number), error -> logger.error("sessionId[{}] Error encountered.", sessionId, error), () -> logger.info("sessionId[{}] Finished sending.", sessionId)); try { StepVerifier.create(receiver.receive()) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .thenCancel() .verify(Duration.ofMinutes(2)); } finally { subscription.dispose(); completeMessages(receiver, lockTokens, sessionId); } }
completeMessages(receiver, lockTokens, sessionId);
void singleUnnamedSession(MessagingEntityType entityType) { final int entityIndex = TestUtils.USE_CASE_SINGLE_SESSION; final String messageId = "singleUnnamedSession"; final String sessionId = "singleUnnamedSession-" + Instant.now().toString(); final String contents = "Some-contents"; final int numberToSend = 5; final List<String> lockTokens = new ArrayList<>(); setSenderAndReceiver(entityType, entityIndex, TIMEOUT, builder -> builder.maxAutoLockRenewalDuration(Duration.ofMinutes(2))); final Disposable subscription = Flux.interval(Duration.ofMillis(500)) .take(numberToSend) .flatMap(index -> { final ServiceBusMessage message = getServiceBusMessage(contents, messageId) .setSessionId(sessionId); messagesPending.incrementAndGet(); return sender.send(message).thenReturn(index); }).subscribe( number -> logger.info("sessionId[{}] sent[{}] Message sent.", sessionId, number), error -> logger.error("sessionId[{}] Error encountered.", sessionId, error), () -> logger.info("sessionId[{}] Finished sending.", sessionId)); try { StepVerifier.create(receiver.receive()) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .assertNext(context -> assertMessageEquals(sessionId, messageId, contents, context)) .thenCancel() .verify(Duration.ofMinutes(2)); } finally { subscription.dispose(); Mono.when(lockTokens.stream().map(e -> receiver.complete(MessageLockToken.fromString(e), sessionId)) .collect(Collectors.toList())) .block(TIMEOUT); } }
class UnnamedSessionManagerIntegrationTest extends IntegrationTestBase { private final AtomicInteger messagesPending = new AtomicInteger(); private ServiceBusReceiverAsyncClient receiver; private ServiceBusSenderAsyncClient sender; UnnamedSessionManagerIntegrationTest() { super(new ClientLogger(UnnamedSessionManagerIntegrationTest.class)); } @Override protected void beforeTest() { sessionId = UUID.randomUUID().toString(); } @Override protected void afterTest() { final int pending = messagesPending.get(); logger.info("Pending messages: {}", pending); } @ParameterizedTest @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase /** * Verifies that we can roll over to a next session. */ @Test void multipleSessions() { final int entityIndex = TestUtils.USE_CASE_MULTIPLE_SESSION; final String messageId = "singleUnnamedSession"; final String now = Instant.now().toString(); final List<String> sessionIds = IntStream.range(0, 3) .mapToObj(number -> String.join("-", String.valueOf(number), "singleUnnamedSession", now)) .collect(Collectors.toList()); logger.info("------ Session ids ------"); for (int i = 0; i < sessionIds.size(); i++) { logger.info("[{}]: {}", i, sessionIds.get(i)); } final String contents = "Some-contents"; final int numberToSend = 3; final int maxMessages = numberToSend * sessionIds.size(); final int maxConcurrency = 2; final Set<String> set = new HashSet<>(); setSenderAndReceiver(MessagingEntityType.SUBSCRIPTION, entityIndex, Duration.ofSeconds(20), builder -> builder.maxConcurrentSessions(maxConcurrency).maxAutoLockRenewalDuration(Duration.ofMinutes(2))); final Disposable subscription = Flux.interval(Duration.ofMillis(500)) .take(maxMessages) .flatMap(index -> { final int i = (int) (index % sessionIds.size()); final String id = sessionIds.get(i); final ServiceBusMessage message = getServiceBusMessage(contents, messageId) .setSessionId(id); messagesPending.incrementAndGet(); return sender.send(message).thenReturn( String.format("sessionId[%s] sent[%s] Message sent.", id, index)); }).subscribe( message -> logger.info(message), error -> logger.error("Error encountered.", error), () -> logger.info("Finished sending.")); try { StepVerifier.create(receiver.receive()) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .thenCancel() .verify(Duration.ofMinutes(2)); } finally { subscription.dispose(); } } private void assertFromSession(List<String> sessionIds, Set<String> currentSessions, int maxSize, String messageId, String contents, ServiceBusReceivedMessageContext context) { logger.info("Verifying message: {}", context.getSessionId()); assertNotNull(context.getSessionId()); assertTrue(sessionIds.contains(context.getSessionId())); if (currentSessions.add(context.getSessionId())) { logger.info("Adding sessionId: {}", context.getSessionId()); } assertTrue(currentSessions.size() <= maxSize, String.format( "Current size (%s) is larger than max (%s).", currentSessions.size(), maxSize)); assertMessageEquals(null, messageId, contents, context); } /** * Sets the sender and receiver. If session is enabled, then a single-named session receiver is created. */ private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, Duration operationTimeout, Function<ServiceBusSessionReceiverClientBuilder, ServiceBusSessionReceiverClientBuilder> onBuild) { this.sender = getSenderBuilder(false, entityType, entityIndex, true, false) .buildAsyncClient(); ServiceBusSessionReceiverClientBuilder sessionBuilder = getSessionReceiverBuilder(false, entityType, entityIndex, builder -> builder.retryOptions(new AmqpRetryOptions().setTryTimeout(operationTimeout)), false); this.receiver = onBuild.apply(sessionBuilder).buildAsyncClient(); } private static void assertMessageEquals(String sessionId, String messageId, String contents, ServiceBusReceivedMessageContext actual) { ServiceBusReceivedMessage message = actual.getMessage(); assertNotNull(message, "'message' should not be null. Error? " + actual.getThrowable()); if (!CoreUtils.isNullOrEmpty(sessionId)) { assertEquals(sessionId, message.getSessionId()); } assertEquals(messageId, message.getMessageId()); assertEquals(contents, new String(message.getBody(), StandardCharsets.UTF_8)); assertNull(actual.getThrowable()); } private int completeMessages(ServiceBusReceiverAsyncClient client, List<String> lockTokens, String sessionId) { Mono.when(lockTokens.stream().map(e -> client.complete(MessageLockToken.fromString(e), sessionId)) .collect(Collectors.toList())) .block(TIMEOUT); return lockTokens.size(); } }
class UnnamedSessionManagerIntegrationTest extends IntegrationTestBase { private final AtomicInteger messagesPending = new AtomicInteger(); private ServiceBusReceiverAsyncClient receiver; private ServiceBusSenderAsyncClient sender; UnnamedSessionManagerIntegrationTest() { super(new ClientLogger(UnnamedSessionManagerIntegrationTest.class)); } @Override protected void beforeTest() { sessionId = UUID.randomUUID().toString(); } @Override protected void afterTest() { final int pending = messagesPending.get(); logger.info("Pending messages: {}", pending); } @ParameterizedTest @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase /** * Verifies that we can roll over to a next session. */ @Test void multipleSessions() { final int entityIndex = TestUtils.USE_CASE_MULTIPLE_SESSION; final String messageId = "singleUnnamedSession"; final String now = Instant.now().toString(); final List<String> sessionIds = IntStream.range(0, 3) .mapToObj(number -> String.join("-", String.valueOf(number), "singleUnnamedSession", now)) .collect(Collectors.toList()); logger.info("------ Session ids ------"); for (int i = 0; i < sessionIds.size(); i++) { logger.info("[{}]: {}", i, sessionIds.get(i)); } final String contents = "Some-contents"; final int numberToSend = 3; final int maxMessages = numberToSend * sessionIds.size(); final int maxConcurrency = 2; final Set<String> set = new HashSet<>(); setSenderAndReceiver(MessagingEntityType.SUBSCRIPTION, entityIndex, Duration.ofSeconds(20), builder -> builder.maxConcurrentSessions(maxConcurrency).maxAutoLockRenewalDuration(Duration.ofMinutes(2))); final Disposable subscription = Flux.interval(Duration.ofMillis(500)) .take(maxMessages) .flatMap(index -> { final int i = (int) (index % sessionIds.size()); final String id = sessionIds.get(i); final ServiceBusMessage message = getServiceBusMessage(contents, messageId) .setSessionId(id); messagesPending.incrementAndGet(); return sender.send(message).thenReturn( String.format("sessionId[%s] sent[%s] Message sent.", id, index)); }).subscribe( message -> logger.info(message), error -> logger.error("Error encountered.", error), () -> logger.info("Finished sending.")); try { StepVerifier.create(receiver.receive()) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .assertNext(context -> assertFromSession(sessionIds, set, maxConcurrency + 1, messageId, contents, context)) .thenCancel() .verify(Duration.ofMinutes(2)); } finally { subscription.dispose(); } } private void assertFromSession(List<String> sessionIds, Set<String> currentSessions, int maxSize, String messageId, String contents, ServiceBusReceivedMessageContext context) { logger.info("Verifying message: {}", context.getSessionId()); assertNotNull(context.getSessionId()); assertTrue(sessionIds.contains(context.getSessionId())); if (currentSessions.add(context.getSessionId())) { logger.info("Adding sessionId: {}", context.getSessionId()); } assertTrue(currentSessions.size() <= maxSize, String.format( "Current size (%s) is larger than max (%s).", currentSessions.size(), maxSize)); assertMessageEquals(null, messageId, contents, context); } /** * Sets the sender and receiver. If session is enabled, then a single-named session receiver is created. */ private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, Duration operationTimeout, Function<ServiceBusSessionReceiverClientBuilder, ServiceBusSessionReceiverClientBuilder> onBuild) { this.sender = getSenderBuilder(false, entityType, entityIndex, true, false) .buildAsyncClient(); ServiceBusSessionReceiverClientBuilder sessionBuilder = getSessionReceiverBuilder(false, entityType, entityIndex, builder -> builder.retryOptions(new AmqpRetryOptions().setTryTimeout(operationTimeout)), false); this.receiver = onBuild.apply(sessionBuilder).buildAsyncClient(); } private static void assertMessageEquals(String sessionId, String messageId, String contents, ServiceBusReceivedMessageContext actual) { ServiceBusReceivedMessage message = actual.getMessage(); assertNotNull(message, "'message' should not be null. Error? " + actual.getThrowable()); if (!CoreUtils.isNullOrEmpty(sessionId)) { assertEquals(sessionId, message.getSessionId()); } assertEquals(messageId, message.getMessageId()); assertEquals(contents, new String(message.getBody(), StandardCharsets.UTF_8)); assertNull(actual.getThrowable()); } }
Since you are checking `instanceof` here, it's better to show that you are converting it into `Integer` type by casting it: `Integer quantity = (Integer) formField.getFieldValue()`. The `printf` statement with `%d` is too subtle to understand why you checked if this was an `instanceof` an Integer. Make the same changes in all places where you are checking for `instanceof`.
private static void getAccountProperties(FormTrainingClient formTrainingClient) { AccountProperties accountProperties = formTrainingClient.getAccountProperties(); System.out.printf("Max number of models that can be trained for this account: %s%n", accountProperties.getCustomModelLimit()); System.out.printf("Current count of trained custom models: %d%n", accountProperties.getCustomModelCount()); }
AccountProperties accountProperties = formTrainingClient.getAccountProperties();
private static void getAccountProperties(FormTrainingClient formTrainingClient) { AccountProperties accountProperties = formTrainingClient.getAccountProperties(); System.out.printf("Max number of models that can be trained for this account: %s%n", accountProperties.getCustomModelLimit()); System.out.printf("Current count of trained custom models: %d%n", accountProperties.getCustomModelCount()); }
class Authentication { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public static void main(String[] args) { /** * Set the environment variables with your own values before running the sample: * AZURE_CLIENT_ID - the client ID of your active directory application. * AZURE_TENANT_ID - the tenant ID of your active directory application. * AZURE_CLIENT_SECRET - the secret of your active directory application. */ authenticationWithKeyCredentialFormRecognizerClient(); authenticationWithAzureActiveDirectoryFormRecognizerClient(); authenticationWithKeyCredentialFormTrainingClient(); authenticationWithAzureActiveDirectoryFormTrainingClient(); } private static void authenticationWithKeyCredentialFormRecognizerClient() { FormRecognizerClient formRecognizerClient = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildClient(); beginRecognizeCustomFormsFromUrl(formRecognizerClient); } private static void authenticationWithAzureActiveDirectoryFormRecognizerClient() { FormRecognizerClient formRecognizerClient = new FormRecognizerClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("{endpoint}") .buildClient(); beginRecognizeCustomFormsFromUrl(formRecognizerClient); } private static void authenticationWithKeyCredentialFormTrainingClient() { FormTrainingClient formTrainingClient = new FormTrainingClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildClient(); getAccountProperties(formTrainingClient); } private static void authenticationWithAzureActiveDirectoryFormTrainingClient() { FormTrainingClient formTrainingClient = new FormTrainingClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("{endpoint}") .buildClient(); getAccountProperties(formTrainingClient); } @SuppressWarnings("unchecked") private static void beginRecognizeCustomFormsFromUrl(FormRecognizerClient formRecognizerClient) { String receiptUrl = "https: + "/azure-ai-formrecognizer/src/samples/java/sample-forms/receipts/contoso-allinone.jpg"; SyncPoller<OperationResult, List<RecognizedForm>> recognizeReceiptPoller = formRecognizerClient.beginRecognizeReceiptsFromUrl(receiptUrl); List<RecognizedForm> receiptPageResults = recognizeReceiptPoller.getFinalResult(); for (int i = 0; i < receiptPageResults.size(); i++) { RecognizedForm recognizedReceipt = receiptPageResults.get(i); Map<String, FormField<?>> recognizedFields = recognizedReceipt.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField<?> merchantNameField = recognizedFields.get("MerchantName"); Object merchantNameFieldValue = recognizedFields.get("MerchantName").getFieldValue(); if (merchantNameFieldValue instanceof String) { System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantNameFieldValue, merchantNameField.getConfidence()); } FormField<?> transactionDateField = recognizedFields.get("TransactionDate"); Object transactionDateFieldValue = recognizedFields.get("MerchantName").getFieldValue(); if (transactionDateFieldValue instanceof LocalDate) { LocalDate transactionDate = (LocalDate) transactionDateFieldValue; System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } FormField<?> receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (receiptItemsField.getFieldValue() instanceof List) { List<FormField<?>> receiptItems = (List<FormField<?>>) receiptItemsField.getFieldValue(); receiptItems.forEach(receiptItem -> { if (receiptItem.getFieldValue() instanceof Map) { ((Map<String, FormField<?>>) receiptItem.getFieldValue()).forEach((key, formField) -> { if ("Name".equals(key)) { if (formField.getFieldValue() instanceof String) { System.out.printf("Name: %s, confidence: %.2fs%n", formField.getFieldValue(), formField.getConfidence()); } } if ("Quantity".equals(key)) { if (formField.getFieldValue() instanceof Integer) { System.out.printf("Quantity: %d, confidence: %.2f%n", formField.getFieldValue(), formField.getConfidence()); } } }); } }); } } System.out.print("-----------------------------------"); } } }
class Authentication { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * */ public static void main(String[] args) { /** * Set the environment variables with your own values before running the sample: * AZURE_CLIENT_ID - the client ID of your active directory application. * AZURE_TENANT_ID - the tenant ID of your active directory application. * AZURE_CLIENT_SECRET - the secret of your active directory application. */ authenticationWithKeyCredentialFormRecognizerClient(); authenticationWithAzureActiveDirectoryFormRecognizerClient(); authenticationWithKeyCredentialFormTrainingClient(); authenticationWithAzureActiveDirectoryFormTrainingClient(); } private static void authenticationWithKeyCredentialFormRecognizerClient() { FormRecognizerClient formRecognizerClient = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildClient(); beginRecognizeCustomFormsFromUrl(formRecognizerClient); } private static void authenticationWithAzureActiveDirectoryFormRecognizerClient() { FormRecognizerClient formRecognizerClient = new FormRecognizerClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("{endpoint}") .buildClient(); beginRecognizeCustomFormsFromUrl(formRecognizerClient); } private static void authenticationWithKeyCredentialFormTrainingClient() { FormTrainingClient formTrainingClient = new FormTrainingClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildClient(); getAccountProperties(formTrainingClient); } private static void authenticationWithAzureActiveDirectoryFormTrainingClient() { FormTrainingClient formTrainingClient = new FormTrainingClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("{endpoint}") .buildClient(); getAccountProperties(formTrainingClient); } private static void beginRecognizeCustomFormsFromUrl(FormRecognizerClient formRecognizerClient) { String receiptUrl = "https: + "/azure-ai-formrecognizer/src/samples/java/sample-forms/receipts/contoso-allinone.jpg"; SyncPoller<OperationResult, List<RecognizedForm>> recognizeReceiptPoller = formRecognizerClient.beginRecognizeReceiptsFromUrl(receiptUrl); List<RecognizedForm> receiptPageResults = recognizeReceiptPoller.getFinalResult(); for (int i = 0; i < receiptPageResults.size(); i++) { RecognizedForm recognizedForm = receiptPageResults.get(i); Map<String, FormField<?>> recognizedFields = recognizedForm.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField<?> merchantNameField = recognizedFields.get("MerchantName"); if (merchantNameField != null) { if (FieldValueType.STRING.equals(merchantNameField.getValueType())) { String merchantName = FieldValueType.STRING.cast(merchantNameField); System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantName, merchantNameField.getConfidence()); } } FormField<?> merchantPhoneNumberField = recognizedFields.get("MerchantPhoneNumber"); if (merchantPhoneNumberField != null) { if (FieldValueType.PHONE_NUMBER.equals(merchantNameField.getValueType())) { String merchantAddress = FieldValueType.PHONE_NUMBER.cast(merchantPhoneNumberField); System.out.printf("Merchant Phone number: %s, confidence: %.2f%n", merchantAddress, merchantPhoneNumberField.getConfidence()); } } FormField<?> transactionDateField = recognizedFields.get("TransactionDate"); if (transactionDateField != null) { if (FieldValueType.DATE.equals(transactionDateField.getValueType())) { LocalDate transactionDate = FieldValueType.DATE.cast(transactionDateField); System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } } FormField<?> receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (FieldValueType.LIST.equals(receiptItemsField.getValueType())) { List<FormField<?>> receiptItems = FieldValueType.LIST.cast(receiptItemsField); receiptItems.forEach(receiptItem -> { if (FieldValueType.MAP.equals(receiptItem.getValueType())) { Map<String, FormField<?>> formFieldMap = FieldValueType.MAP.cast(receiptItem); formFieldMap.forEach((key, formField) -> { if ("Name".equals(key)) { if (FieldValueType.STRING.equals(formField.getValueType())) { String name = FieldValueType.STRING.cast(formField); System.out.printf("Name: %s, confidence: %.2fs%n", name, formField.getConfidence()); } } if ("Quantity".equals(key)) { if (FieldValueType.DOUBLE.equals(formField.getValueType())) { Float quantity = FieldValueType.DOUBLE.cast(formField); System.out.printf("Quantity: %f, confidence: %.2f%n", quantity, formField.getConfidence()); } } }); } }); } } System.out.print("-----------------------------------"); } } }
I feel like we need to either link to the API ref or somehow communicate the expected types for each receipt field. Sorry if you did this and I missed it
public static void main(final String[] args) { FormRecognizerClient client = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildClient(); String receiptUrl = "https: + "/azure-ai-formrecognizer/src/samples/java/sample-forms/receipts/contoso-allinone.jpg"; SyncPoller<OperationResult, List<RecognizedForm>> recognizeReceiptPoller = client.beginRecognizeReceiptsFromUrl(receiptUrl); List<RecognizedForm> receiptPageResults = recognizeReceiptPoller.getFinalResult(); for (int i = 0; i < receiptPageResults.size(); i++) { RecognizedForm recognizedReceipt = receiptPageResults.get(i); Map<String, FormField<?>> recognizedFields = recognizedReceipt.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField<?> merchantNameField = recognizedFields.get("MerchantName"); if (merchantNameField != null) { Object merchantNameFieldValue = merchantNameField.getValue(); if (merchantNameFieldValue instanceof String) { String merchantName = (String) merchantNameFieldValue; System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantName, merchantNameField.getConfidence()); } } FormField<?> merchantAddressField = recognizedFields.get("MerchantAddress"); if (merchantAddressField != null) { Object merchantAddressFieldValue = merchantAddressField.getValue(); if (merchantAddressFieldValue instanceof String) { String merchantAddress = (String) merchantAddressFieldValue; System.out.printf("Merchant Address: %s, confidence: %.2f%n", merchantAddress, merchantAddressField.getConfidence()); } } FormField<?> transactionDateField = recognizedFields.get("TransactionDate"); if (transactionDateField != null) { Object transactionDateFieldValue = transactionDateField.getValue(); if (transactionDateFieldValue instanceof LocalDate) { LocalDate transactionDate = (LocalDate) transactionDateFieldValue; System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } } FormField<?> receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (receiptItemsField.getValue() instanceof List) { List<FormField<?>> receiptItems = (List<FormField<?>>) receiptItemsField.getValue(); receiptItems.forEach(receiptItem -> { if (receiptItem.getValue() instanceof Map) { ((Map<String, FormField<?>>) receiptItem.getValue()).forEach((key, formField) -> { if ("Name".equals(key)) { if (formField.getValue() instanceof String) { String name = (String) formField.getValue(); System.out.printf("Name: %s, confidence: %.2fs%n", name, formField.getConfidence()); } } if ("Quantity".equals(key)) { if (formField.getValue() instanceof Integer) { Integer quantity = (Integer) formField.getValue(); System.out.printf("Quantity: %d, confidence: %.2f%n", quantity, formField.getConfidence()); } } if ("Price".equals(key)) { if (formField.getValue() instanceof Float) { Float price = (Float) formField.getValue(); System.out.printf("Price: %f, confidence: %.2f%n", price, formField.getConfidence()); } } if ("TotalPrice".equals(key)) { if (formField.getValue() instanceof Float) { Float totalPrice = (Float) formField.getValue(); System.out.printf("Total Price: %f, confidence: %.2f%n", totalPrice, formField.getConfidence()); } } }); } }); } } System.out.print("-----------------------------------"); } }
if (merchantNameFieldValue instanceof String) {
public static void main(final String[] args) { FormRecognizerClient client = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildClient(); String receiptUrl = "https: + "/azure-ai-formrecognizer/src/samples/java/sample-forms/receipts/contoso-allinone.jpg"; SyncPoller<OperationResult, List<RecognizedForm>> recognizeReceiptPoller = client.beginRecognizeReceiptsFromUrl(receiptUrl); List<RecognizedForm> receiptPageResults = recognizeReceiptPoller.getFinalResult(); for (int i = 0; i < receiptPageResults.size(); i++) { RecognizedForm recognizedForm = receiptPageResults.get(i); Map<String, FormField<?>> recognizedFields = recognizedForm.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField<?> merchantNameField = recognizedFields.get("MerchantName"); if (merchantNameField != null) { if (FieldValueType.STRING.equals(merchantNameField.getValueType())) { String merchantName = FieldValueType.STRING.cast(merchantNameField); System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantName, merchantNameField.getConfidence()); } } FormField<?> merchantAddressField = recognizedFields.get("MerchantAddress"); if (merchantAddressField != null) { if (FieldValueType.STRING.equals(merchantNameField.getValueType())) { String merchantAddress = FieldValueType.STRING.cast(merchantAddressField); System.out.printf("Merchant Address: %s, confidence: %.2f%n", merchantAddress, merchantAddressField.getConfidence()); } } FormField<?> merchantPhoneNumberField = recognizedFields.get("MerchantPhoneNumber"); if (merchantPhoneNumberField != null) { if (FieldValueType.PHONE_NUMBER.equals(merchantNameField.getValueType())) { String merchantAddress = FieldValueType.PHONE_NUMBER.cast(merchantPhoneNumberField); System.out.printf("Merchant Phone number: %s, confidence: %.2f%n", merchantAddress, merchantPhoneNumberField.getConfidence()); } } FormField<?> transactionDateField = recognizedFields.get("TransactionDate"); if (transactionDateField != null) { if (FieldValueType.DATE.equals(transactionDateField.getValueType())) { LocalDate transactionDate = FieldValueType.DATE.cast(transactionDateField); System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } } FormField<?> receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (FieldValueType.LIST.equals(receiptItemsField.getValueType())) { List<FormField<?>> receiptItems = FieldValueType.LIST.cast(receiptItemsField); receiptItems.forEach(receiptItem -> { if (FieldValueType.MAP.equals(receiptItem.getValueType())) { Map<String, FormField<?>> formFieldMap = FieldValueType.MAP.cast(receiptItem); formFieldMap.forEach((key, formField) -> { if ("Name".equals(key)) { if (FieldValueType.STRING.equals(formField.getValueType())) { String name = FieldValueType.STRING.cast(formField); System.out.printf("Name: %s, confidence: %.2fs%n", name, formField.getConfidence()); } } if ("Quantity".equals(key)) { if (FieldValueType.DOUBLE.equals(formField.getValueType())) { Float quantity = FieldValueType.DOUBLE.cast(formField); System.out.printf("Quantity: %f, confidence: %.2f%n", quantity, formField.getConfidence()); } } if ("Price".equals(key)) { if (FieldValueType.DOUBLE.equals(formField.getValueType())) { Float price = FieldValueType.DOUBLE.cast(formField); System.out.printf("Price: %f, confidence: %.2f%n", price, formField.getConfidence()); } } if ("TotalPrice".equals(key)) { if (FieldValueType.DOUBLE.equals(formField.getValueType())) { Float totalPrice = FieldValueType.DOUBLE.cast(formField); System.out.printf("Total Price: %f, confidence: %.2f%n", totalPrice, formField.getConfidence()); } } }); } }); } } System.out.print("-----------------------------------"); } }
class RecognizeReceiptsFromUrl { /** * Main method to invoke this demo. * * @param args Unused. Arguments to the program. */ @SuppressWarnings("unchecked") }
class RecognizeReceiptsFromUrl { /** * Main method to invoke this demo. * * @param args Unused. Arguments to the program. */ }
added :)
public static void main(final String[] args) { FormRecognizerClient client = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildClient(); String receiptUrl = "https: + "/azure-ai-formrecognizer/src/samples/java/sample-forms/receipts/contoso-allinone.jpg"; SyncPoller<OperationResult, List<RecognizedForm>> recognizeReceiptPoller = client.beginRecognizeReceiptsFromUrl(receiptUrl); List<RecognizedForm> receiptPageResults = recognizeReceiptPoller.getFinalResult(); for (int i = 0; i < receiptPageResults.size(); i++) { RecognizedForm recognizedReceipt = receiptPageResults.get(i); Map<String, FormField<?>> recognizedFields = recognizedReceipt.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField<?> merchantNameField = recognizedFields.get("MerchantName"); if (merchantNameField != null) { Object merchantNameFieldValue = merchantNameField.getValue(); if (merchantNameFieldValue instanceof String) { String merchantName = (String) merchantNameFieldValue; System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantName, merchantNameField.getConfidence()); } } FormField<?> merchantAddressField = recognizedFields.get("MerchantAddress"); if (merchantAddressField != null) { Object merchantAddressFieldValue = merchantAddressField.getValue(); if (merchantAddressFieldValue instanceof String) { String merchantAddress = (String) merchantAddressFieldValue; System.out.printf("Merchant Address: %s, confidence: %.2f%n", merchantAddress, merchantAddressField.getConfidence()); } } FormField<?> transactionDateField = recognizedFields.get("TransactionDate"); if (transactionDateField != null) { Object transactionDateFieldValue = transactionDateField.getValue(); if (transactionDateFieldValue instanceof LocalDate) { LocalDate transactionDate = (LocalDate) transactionDateFieldValue; System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } } FormField<?> receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (receiptItemsField.getValue() instanceof List) { List<FormField<?>> receiptItems = (List<FormField<?>>) receiptItemsField.getValue(); receiptItems.forEach(receiptItem -> { if (receiptItem.getValue() instanceof Map) { ((Map<String, FormField<?>>) receiptItem.getValue()).forEach((key, formField) -> { if ("Name".equals(key)) { if (formField.getValue() instanceof String) { String name = (String) formField.getValue(); System.out.printf("Name: %s, confidence: %.2fs%n", name, formField.getConfidence()); } } if ("Quantity".equals(key)) { if (formField.getValue() instanceof Integer) { Integer quantity = (Integer) formField.getValue(); System.out.printf("Quantity: %d, confidence: %.2f%n", quantity, formField.getConfidence()); } } if ("Price".equals(key)) { if (formField.getValue() instanceof Float) { Float price = (Float) formField.getValue(); System.out.printf("Price: %f, confidence: %.2f%n", price, formField.getConfidence()); } } if ("TotalPrice".equals(key)) { if (formField.getValue() instanceof Float) { Float totalPrice = (Float) formField.getValue(); System.out.printf("Total Price: %f, confidence: %.2f%n", totalPrice, formField.getConfidence()); } } }); } }); } } System.out.print("-----------------------------------"); } }
if (merchantNameFieldValue instanceof String) {
public static void main(final String[] args) { FormRecognizerClient client = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildClient(); String receiptUrl = "https: + "/azure-ai-formrecognizer/src/samples/java/sample-forms/receipts/contoso-allinone.jpg"; SyncPoller<OperationResult, List<RecognizedForm>> recognizeReceiptPoller = client.beginRecognizeReceiptsFromUrl(receiptUrl); List<RecognizedForm> receiptPageResults = recognizeReceiptPoller.getFinalResult(); for (int i = 0; i < receiptPageResults.size(); i++) { RecognizedForm recognizedForm = receiptPageResults.get(i); Map<String, FormField<?>> recognizedFields = recognizedForm.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField<?> merchantNameField = recognizedFields.get("MerchantName"); if (merchantNameField != null) { if (FieldValueType.STRING.equals(merchantNameField.getValueType())) { String merchantName = FieldValueType.STRING.cast(merchantNameField); System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantName, merchantNameField.getConfidence()); } } FormField<?> merchantAddressField = recognizedFields.get("MerchantAddress"); if (merchantAddressField != null) { if (FieldValueType.STRING.equals(merchantNameField.getValueType())) { String merchantAddress = FieldValueType.STRING.cast(merchantAddressField); System.out.printf("Merchant Address: %s, confidence: %.2f%n", merchantAddress, merchantAddressField.getConfidence()); } } FormField<?> merchantPhoneNumberField = recognizedFields.get("MerchantPhoneNumber"); if (merchantPhoneNumberField != null) { if (FieldValueType.PHONE_NUMBER.equals(merchantNameField.getValueType())) { String merchantAddress = FieldValueType.PHONE_NUMBER.cast(merchantPhoneNumberField); System.out.printf("Merchant Phone number: %s, confidence: %.2f%n", merchantAddress, merchantPhoneNumberField.getConfidence()); } } FormField<?> transactionDateField = recognizedFields.get("TransactionDate"); if (transactionDateField != null) { if (FieldValueType.DATE.equals(transactionDateField.getValueType())) { LocalDate transactionDate = FieldValueType.DATE.cast(transactionDateField); System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } } FormField<?> receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (FieldValueType.LIST.equals(receiptItemsField.getValueType())) { List<FormField<?>> receiptItems = FieldValueType.LIST.cast(receiptItemsField); receiptItems.forEach(receiptItem -> { if (FieldValueType.MAP.equals(receiptItem.getValueType())) { Map<String, FormField<?>> formFieldMap = FieldValueType.MAP.cast(receiptItem); formFieldMap.forEach((key, formField) -> { if ("Name".equals(key)) { if (FieldValueType.STRING.equals(formField.getValueType())) { String name = FieldValueType.STRING.cast(formField); System.out.printf("Name: %s, confidence: %.2fs%n", name, formField.getConfidence()); } } if ("Quantity".equals(key)) { if (FieldValueType.DOUBLE.equals(formField.getValueType())) { Float quantity = FieldValueType.DOUBLE.cast(formField); System.out.printf("Quantity: %f, confidence: %.2f%n", quantity, formField.getConfidence()); } } if ("Price".equals(key)) { if (FieldValueType.DOUBLE.equals(formField.getValueType())) { Float price = FieldValueType.DOUBLE.cast(formField); System.out.printf("Price: %f, confidence: %.2f%n", price, formField.getConfidence()); } } if ("TotalPrice".equals(key)) { if (FieldValueType.DOUBLE.equals(formField.getValueType())) { Float totalPrice = FieldValueType.DOUBLE.cast(formField); System.out.printf("Total Price: %f, confidence: %.2f%n", totalPrice, formField.getConfidence()); } } }); } }); } } System.out.print("-----------------------------------"); } }
class RecognizeReceiptsFromUrl { /** * Main method to invoke this demo. * * @param args Unused. Arguments to the program. */ @SuppressWarnings("unchecked") }
class RecognizeReceiptsFromUrl { /** * Main method to invoke this demo. * * @param args Unused. Arguments to the program. */ }
Update this to using the public beginRecognizeContent API
public void recognizeContentResultWithBlankPdf(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent(getReplayableBufferData(BLANK_FORM_LOCAL_URL), BLANK_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, null).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }
client.beginRecognizeContent(getReplayableBufferData(BLANK_FORM_LOCAL_URL), BLANK_FORM_FILE_LENGTH,
public void recognizeContentResultWithBlankPdf(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); blankPdfDataRunner((data, dataLength) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent( new RecognizeOptions(toFluxByteBuffer(data), dataLength) .setFormContentType(FormContentType.APPLICATION_PDF).setPollInterval(durationTestMode)) .getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); }
class FormRecognizerAsyncClientTest extends FormRecognizerClientTestBase { private FormRecognizerAsyncClient client; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } private FormRecognizerAsyncClient getFormRecognizerAsyncClient(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { return getFormRecognizerClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } private FormTrainingAsyncClient getFormTrainingAsyncClient(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { return getFormTrainingClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } /** * Verifies receipt data from a document using file data as source. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptDataRunner((data) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(toFluxByteBuffer(data), RECEIPT_FILE_LENGTH, FormContentType.IMAGE_JPEG, false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies an exception thrown for a document using null data value. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataTextDetailsWithNullData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); assertThrows(RuntimeException.class, () -> client.beginRecognizeReceipts(null, RECEIPT_FILE_LENGTH, FormContentType.IMAGE_JPEG, false, null).getSyncPoller()); } /** * Verifies content type will be auto detected when using custom form API with input stream data overload. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataWithContentTypeAutoDetection(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(getReplayableBufferData(RECEIPT_LOCAL_URL), RECEIPT_FILE_LENGTH, null, false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), false); } /** * Verifies receipt data from a document using file data as source and including text content details. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataTextDetails(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptDataRunnerTextDetails((data, includeTextDetails) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(toFluxByteBuffer(data), RECEIPT_FILE_LENGTH, FormContentType.IMAGE_JPEG, includeTextDetails, null).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), true); }); } /** * Verifies receipt data from a document using PNG file data as source and including text content details. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataWithPngFile(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptPngDataRunnerTextDetails((data, includeTextDetails) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(toFluxByteBuffer(data), RECEIPT_PNG_FILE_LENGTH, FormContentType.IMAGE_PNG, includeTextDetails, null).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), true); }); } /** * Verifies receipt data from a document using blank PDF. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataWithBlankPdf(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); blankPdfDataRunner(data -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(toFluxByteBuffer(data), BLANK_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateBlankPdfResultData(syncPoller.getFinalResult()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptFromDataMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner(data -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(toFluxByteBuffer(data), MULTIPAGE_INVOICE_FILE_LENGTH, FormContentType.APPLICATION_PDF).getSyncPoller(); syncPoller.waitForCompletion(); validateMultipageReceiptData(syncPoller.getFinalResult()); }); } /** * Verifies receipt data for a document using source as file url. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptSourceUrlRunner(sourceUrl -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceiptsFromUrl(sourceUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies that an exception is thrown for invalid source url. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptInvalidSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); invalidSourceUrlRunner((sourceUrl) -> assertThrows(ErrorResponseException.class, () -> client.beginRecognizeReceiptsFromUrl(sourceUrl).getSyncPoller())); } /** * Verifies receipt data for a document using source as file url and include content when includeTextDetails is * true. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptSourceUrlTextDetails(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptSourceUrlRunnerTextDetails((sourceUrl, includeTextDetails) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceiptsFromUrl(sourceUrl, includeTextDetails, null).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), includeTextDetails); }); } /** * Verifies receipt data for a document using source as PNG file url and include content when includeTextDetails is * true. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptSourceUrlWithPngFile(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptPngSourceUrlRunnerTextDetails((sourceUrl, includeTextDetails) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceiptsFromUrl(sourceUrl, includeTextDetails, null).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), includeTextDetails); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptFromUrlMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceiptsFromUrl(fileUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateMultipageReceiptData(syncPoller.getFinalResult()); }); } /** * Verifies layout data for a document using source as input stream data. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); contentFromDataRunner((data) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent(toFluxByteBuffer(data), LAYOUT_FILE_LENGTH, FormContentType.IMAGE_JPEG, null).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies an exception thrown for a document using null data value. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentResultWithNullData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { assertThrows(RuntimeException.class, () -> client.beginRecognizeContent(null, LAYOUT_FILE_LENGTH, FormContentType.IMAGE_JPEG, null).getSyncPoller()); } /** * Verifies content type will be auto detected when using content/layout API with input stream data overload. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentResultWithContentTypeAutoDetection(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); contentFromDataRunner((data) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent(getReplayableBufferData(LAYOUT_LOCAL_URL), LAYOUT_FILE_LENGTH, null, null).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies blank form file is still a valid file to process */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils /** * Verifies throwing exception when using bad content type argument */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeBadContentTypeArgument(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); contentFromDataRunner((data) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent(toFluxByteBuffer(data), LAYOUT_FILE_LENGTH, FormContentType.fromString("application/jpeg"), null).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromDataMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner(data -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent(toFluxByteBuffer(data), MULTIPAGE_INVOICE_FILE_LENGTH, FormContentType.APPLICATION_PDF).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies layout data for a document using source as input stream data. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); contentFromUrlRunner(sourceUrl -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContentFromUrl(sourceUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies layout data for a pdf url */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromUrlWithPdf(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); pdfContentFromUrlRunner(sourceUrl -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContentFromUrl(sourceUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies that an exception is thrown for invalid status model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentInvalidSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); invalidSourceUrlRunner((invalidSourceUrl) -> assertThrows(ErrorResponseException.class, () -> client.beginRecognizeContentFromUrl(invalidSourceUrl).getSyncPoller())); } /** * Verifies layout data for a blank pdf url */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromUrlWithEncodedBlankSpace(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromUrlMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner((fileUrl) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContentFromUrl(fileUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies custom form data for a document using source as input stream data and valid labeled model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), trainingPoller.getFinalResult().getModelId(), CUSTOM_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, true, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, true); })); } /** * Verifies custom form data for a JPG content type with labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithJpgContentType(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormJpgDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), trainingPoller.getFinalResult().getModelId(), FORM_1_JPG_FILE_LENGTH, FormContentType.IMAGE_JPEG, false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, true); })); } /** * Verifies custom form data for a blank PDF content type with labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithBlankPdfContentType(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); blankPdfDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), trainingPoller.getFinalResult().getModelId(), BLANK_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, true); })); } /** * Verifies custom form data for a document using source as input stream data and valid labeled model Id, * excluding text content. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataExcludeTextContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), trainingPoller.getFinalResult().getModelId(), CUSTOM_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, true); })); } /** * Verifies an exception thrown for a document using null data value. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithNullFormData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); syncPoller.waitForCompletion(); assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomForms(null, syncPoller.getFinalResult().getModelId(), CUSTOM_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, true, null).getSyncPoller()); })); } /** * Verifies an exception thrown for a document using null model id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithNullModelId(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); syncPoller.waitForCompletion(); Exception ex = assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomForms(toFluxByteBuffer(data), null, CUSTOM_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, true, null) .getSyncPoller()); assertEquals(EXPECTED_MODEL_ID_IS_REQUIRED_EXCEPTION_MESSAGE, ex.getMessage()); })); } /** * Verifies an exception thrown for an empty model id when recognizing custom form from URL. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithEmptyModelId(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); syncPoller.waitForCompletion(); Exception ex = assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomForms(toFluxByteBuffer(data), "", CUSTOM_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, true, null).getSyncPoller()); assertEquals(EXPECTED_INVALID_UUID_EXCEPTION_MESSAGE, ex.getMessage()); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormInvalidStatus(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); invalidSourceUrlRunner((invalidSourceUrl) -> beginTrainingLabeledRunner((training, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(training, useTrainingLabels) .getSyncPoller(); syncPoller.waitForCompletion(); CustomFormModel createdModel = syncPoller.getFinalResult(); FormRecognizerException formRecognizerException = assertThrows(FormRecognizerException.class, () -> client.beginRecognizeCustomFormsFromUrl(invalidSourceUrl, createdModel.getModelId()) .getSyncPoller().getFinalResult()); ErrorInformation errorInformation = formRecognizerException.getErrorInformation().get(0); })); } /** * Verifies content type will be auto detected when using custom form API with input stream data overload. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithContentTypeAutoDetection(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(getReplayableBufferData(FORM_LOCAL_URL), trainingPoller.getFinalResult().getModelId(), CUSTOM_FORM_FILE_LENGTH, null, true, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, true); })); } /** * Verify custom form for a data stream of multi-page labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormMultiPageLabeled(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner(data -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion) .beginTraining(trainingFilesUrl, true).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), trainingPoller.getFinalResult().getModelId(), MULTIPAGE_INVOICE_FILE_LENGTH, FormContentType.APPLICATION_PDF).getSyncPoller(); syncPoller.waitForCompletion(); validateMultiPageDataLabeled(syncPoller.getFinalResult()); })); } /** * Verifies custom form data for a document using source as input stream data and valid labeled model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUnlabeledData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), trainingPoller.getFinalResult().getModelId(), CUSTOM_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, false); })); } /** * Verifies custom form data for a document using source as input stream data and valid include text content */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUnlabeledDataIncludeTextContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), trainingPoller.getFinalResult().getModelId(), CUSTOM_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, true, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, false); })); } /** * Verify custom form for a data stream of multi-page unlabeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormMultiPageUnlabeled(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner(data -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion) .beginTraining(trainingFilesUrl, false).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), trainingPoller.getFinalResult().getModelId(), MULTIPAGE_INVOICE_FILE_LENGTH, FormContentType.APPLICATION_PDF).getSyncPoller(); syncPoller.waitForCompletion(); validateMultiPageDataUnlabeled(syncPoller.getFinalResult()); })); } /** * Verifies custom form data for a JPG content type with unlabeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUnlabeledDataWithJpgContentType(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormJpgDataRunner(data -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), trainingPoller.getFinalResult().getModelId(), FORM_1_JPG_FILE_LENGTH, FormContentType.IMAGE_JPEG, false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, false); })); } /** * Verifies custom form data for a blank PDF content type with unlabeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUnlabeledDataWithBlankPdfContentType(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); blankPdfDataRunner(data -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), trainingPoller.getFinalResult().getModelId(), BLANK_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, false); })); } /** * Verifies custom form data for an URL document data without labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlUnlabeledData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); urlDataRunner(fileUrl -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomFormsFromUrl(fileUrl, trainingPoller.getFinalResult().getModelId(), false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, false); }), FORM_JPG); } /** * Verifies custom form data for an URL document data without labeled data and include text content */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlUnlabeledDataIncludeTextContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); urlDataRunner(fileUrl -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomFormsFromUrl(fileUrl, trainingPoller.getFinalResult().getModelId(), true, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, false); }), FORM_JPG); } /** * Verify custom form for an URL of multi-page unlabeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlMultiPageUnlabeled(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion) .beginTraining(trainingFilesUrl, false).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomFormsFromUrl(fileUrl, trainingPoller.getFinalResult().getModelId()) .getSyncPoller(); syncPoller.waitForCompletion(); validateMultiPageDataUnlabeled(syncPoller.getFinalResult()); })); } /** * Verifies that an exception is thrown for invalid status model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormInvalidSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); syncPoller.waitForCompletion(); CustomFormModel createdModel = syncPoller.getFinalResult(); StepVerifier.create(client.beginRecognizeCustomFormsFromUrl(INVALID_URL, createdModel.getModelId())) .verifyErrorSatisfies(throwable -> assertEquals(throwable.getMessage(), INVALID_SOURCE_URL_ERROR)); }); } /** * Verifies an exception thrown for a null model id when recognizing custom form from URL. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormFromUrlLabeledDataWithNullModelId(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion) .beginTraining(trainingFilesUrl, true).getSyncPoller(); trainingPoller.waitForCompletion(); Exception ex = assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomFormsFromUrl(fileUrl, null).getSyncPoller()); assertEquals(EXPECTED_MODEL_ID_IS_REQUIRED_EXCEPTION_MESSAGE, ex.getMessage()); })); } /** * Verifies an exception thrown for an empty model id for recognizing custom forms from URL. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormFromUrlLabeledDataWithEmptyModelId(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion) .beginTraining(trainingFilesUrl, true).getSyncPoller(); trainingPoller.waitForCompletion(); Exception ex = assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomFormsFromUrl(fileUrl, "").getSyncPoller()); assertEquals(EXPECTED_INVALID_UUID_EXCEPTION_MESSAGE, ex.getMessage()); })); } /** * Verifies custom form data for an URL document data with labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlLabeledData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); urlDataRunner(fileUrl -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomFormsFromUrl(fileUrl, trainingPoller.getFinalResult().getModelId(), false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, true); }), FORM_JPG); } /** * Verifies custom form data for an URL document data with labeled data and include text content */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlLabeledDataIncludeTextContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); urlDataRunner(fileUrl -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomFormsFromUrl(fileUrl, trainingPoller.getFinalResult().getModelId(), true, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, true); }), FORM_JPG); } /** * Verify custom form for an URL of multi-page labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlMultiPageLabeled(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion) .beginTraining(trainingFilesUrl, true).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomFormsFromUrl(fileUrl, trainingPoller.getFinalResult().getModelId()) .getSyncPoller(); syncPoller.waitForCompletion(); validateMultiPageDataLabeled(syncPoller.getFinalResult()); })); } }
class FormRecognizerAsyncClientTest extends FormRecognizerClientTestBase { private FormRecognizerAsyncClient client; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } private FormRecognizerAsyncClient getFormRecognizerAsyncClient(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { return getFormRecognizerClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } private FormTrainingAsyncClient getFormTrainingAsyncClient(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { return getFormTrainingClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } /** * Verifies receipt data from a document using file data as source. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptDataRunner((data, dataLength) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(toFluxByteBuffer(data), dataLength, FormContentType.IMAGE_JPEG) .getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies an exception thrown for a document using null data value. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataNullData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); assertThrows(RuntimeException.class, () -> client.beginRecognizeReceipts(null, RECEIPT_FILE_LENGTH, FormContentType.IMAGE_JPEG).getSyncPoller()); } /** * Verifies content type will be auto detected when using custom form API with input stream data overload. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataWithContentTypeAutoDetection(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts( new RecognizeOptions(getReplayableBufferData(RECEIPT_LOCAL_URL), RECEIPT_FILE_LENGTH) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), false); } /** * Verifies receipt data from a document using file data as source and including text content details. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataTextDetails(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptDataRunnerTextDetails((data, includeTextContent) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts( new RecognizeOptions(toFluxByteBuffer(data), RECEIPT_FILE_LENGTH) .setFormContentType(FormContentType.IMAGE_JPEG).setIncludeTextContent(includeTextContent) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), includeTextContent); }); } /** * Verifies receipt data from a document using PNG file data as source and including text content details. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataWithPngFile(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptPngDataRunnerTextDetails((data, includeTextContent) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(new RecognizeOptions(toFluxByteBuffer(data), RECEIPT_PNG_FILE_LENGTH) .setFormContentType(FormContentType.IMAGE_PNG).setIncludeTextContent(includeTextContent) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), true); }); } /** * Verifies receipt data from a document using blank PDF. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataWithBlankPdf(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); blankPdfDataRunner((data, dataLength) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(new RecognizeOptions(toFluxByteBuffer(data), dataLength) .setFormContentType(FormContentType.APPLICATION_PDF).setPollInterval(durationTestMode)) .getSyncPoller(); syncPoller.waitForCompletion(); validateBlankPdfResultData(syncPoller.getFinalResult()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptFromDataMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner((data, dataLength) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts( new RecognizeOptions(toFluxByteBuffer(data), dataLength) .setFormContentType(FormContentType.APPLICATION_PDF).setPollInterval(durationTestMode)) .getSyncPoller(); syncPoller.waitForCompletion(); validateMultipageReceiptData(syncPoller.getFinalResult()); }); } /** * Verifies receipt data for a document using source as file url. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptSourceUrlRunner(sourceUrl -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceiptsFromUrl(sourceUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies that an exception is thrown for invalid source url. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptInvalidSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); invalidSourceUrlRunner((invalidSourceUrl) -> { HttpResponseException errorResponseException = assertThrows(HttpResponseException.class, () -> client.beginRecognizeReceiptsFromUrl(new RecognizeOptions(invalidSourceUrl) .setPollInterval(durationTestMode)).getSyncPoller().getFinalResult()); ErrorInformation errorInformation = (ErrorInformation) errorResponseException.getValue(); assertEquals(INVALID_IMAGE_URL_ERROR_CODE, errorInformation.getCode()); assertEquals(IMAGE_URL_IS_BADLY_FORMATTED_ERROR_MESSAGE, errorInformation.getMessage()); }); } /** * Verifies receipt data for a document using source as file url and include content when includeTextContent is * true. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptFromUrlTextContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptSourceUrlRunnerTextDetails((sourceUrl, includeTextContent) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceiptsFromUrl( new RecognizeOptions(sourceUrl).setIncludeTextContent(includeTextContent) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), includeTextContent); }); } /** * Verifies receipt data for a document using source as PNG file url and include content when includeTextContent is * true. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptSourceUrlWithPngFile(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptPngSourceUrlRunnerTextDetails((sourceUrl, includeTextContent) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceiptsFromUrl( new RecognizeOptions(sourceUrl).setIncludeTextContent(includeTextContent) .setPollInterval(durationTestMode)) .getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), includeTextContent); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptFromUrlMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts( new RecognizeOptions(fileUrl).setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateMultipageReceiptData(syncPoller.getFinalResult()); }); } /** * Verifies layout data for a document using source as input stream data. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); contentFromDataRunner((data, dataLength) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent( toFluxByteBuffer(data), dataLength, FormContentType.IMAGE_JPEG).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies an exception thrown for a document using null data value. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentResultWithNullData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { assertThrows(RuntimeException.class, () -> client.beginRecognizeContent(null, LAYOUT_FILE_LENGTH, FormContentType.IMAGE_JPEG).getSyncPoller()); } /** * Verifies content type will be auto detected when using content/layout API with input stream data overload. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentResultWithContentTypeAutoDetection(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); contentFromDataRunner((data, dataLength) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent( new RecognizeOptions(getReplayableBufferData(LAYOUT_LOCAL_URL), dataLength) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies blank form file is still a valid file to process */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromDataMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner((data, dataLength) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent( new RecognizeOptions(toFluxByteBuffer(data), dataLength) .setFormContentType(FormContentType.APPLICATION_PDF).setPollInterval(durationTestMode)) .getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies layout data for a document using source as input stream data. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); contentFromUrlRunner(sourceUrl -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContentFromUrl(sourceUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies layout data for a pdf url */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromUrlWithPdf(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); pdfContentFromUrlRunner(sourceUrl -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent(new RecognizeOptions(sourceUrl).setPollInterval(durationTestMode)) .getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies that an exception is thrown for invalid status model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentInvalidSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); invalidSourceUrlRunner((invalidSourceUrl) -> { HttpResponseException errorResponseException = assertThrows(HttpResponseException.class, () -> client.beginRecognizeContent(new RecognizeOptions(invalidSourceUrl) .setPollInterval(durationTestMode)).getSyncPoller().getFinalResult()); ErrorInformation errorInformation = (ErrorInformation) errorResponseException.getValue(); assertEquals(INVALID_IMAGE_URL_ERROR_CODE, errorInformation.getCode()); assertEquals(IMAGE_URL_IS_BADLY_FORMATTED_ERROR_MESSAGE, errorInformation.getMessage()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromUrlMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner((fileUrl) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent( new RecognizeOptions(fileUrl).setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies custom form data for a document using source as input stream data and valid labeled model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner((data, dataLength) -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, trainingPoller.getFinalResult().getModelId()) .setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, true); })); } /** * Verifies custom form data for a JPG content type with labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithJpgContentType(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormJpgDataRunner((data, dataLength) -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, trainingPoller.getFinalResult().getModelId()).setFormContentType(FormContentType.IMAGE_JPEG) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, true); })); } /** * Verifies custom form data for a blank PDF content type with labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithBlankPdfContentType(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); blankPdfDataRunner((data, dataLength) -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, trainingPoller.getFinalResult().getModelId()).setFormContentType(FormContentType.APPLICATION_PDF) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, true); })); } /** * Verifies custom form data for a document using source as input stream data and valid labeled model Id, * excluding text content. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataExcludeTextContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner((data, dataLength) -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, trainingPoller.getFinalResult().getModelId()).setFormContentType(FormContentType.APPLICATION_PDF) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, true); })); } /** * Verifies an exception thrown for a document using null data value. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithNullFormData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner((data, dataLength) -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); syncPoller.waitForCompletion(); assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions((InputStream) null, dataLength, syncPoller.getFinalResult().getModelId()).setFormContentType(FormContentType.APPLICATION_PDF) .setIncludeTextContent(true).setPollInterval(durationTestMode)).getSyncPoller()); })); } /** * Verifies an exception thrown for a document using null model id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithNullModelId(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner((data, dataLength) -> { Exception ex = assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, null).setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true) .setPollInterval(durationTestMode)).getSyncPoller()); assertEquals(EXPECTED_MODEL_ID_IS_REQUIRED_EXCEPTION_MESSAGE, ex.getMessage()); }); } /** * Verifies an exception thrown for an empty model id when recognizing custom form from URL. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithEmptyModelId(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner((data, dataLength) -> { Exception ex = assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, "") .setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true) .setPollInterval(durationTestMode)).getSyncPoller()); assertEquals(EXPECTED_INVALID_UUID_EXCEPTION_MESSAGE, ex.getMessage()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormInvalidStatus(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); invalidSourceUrlRunner((invalidSourceUrl) -> beginTrainingLabeledRunner((training, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(training, useTrainingLabels, null, durationTestMode).getSyncPoller(); syncPoller.waitForCompletion(); CustomFormModel createdModel = syncPoller.getFinalResult(); FormRecognizerException formRecognizerException = assertThrows(FormRecognizerException.class, () -> client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions( invalidSourceUrl, createdModel.getModelId()).setPollInterval(durationTestMode)) .getSyncPoller().getFinalResult()); ErrorInformation errorInformation = formRecognizerException.getErrorInformation().get(0); })); } /** * Verifies content type will be auto detected when using custom form API with input stream data overload. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithContentTypeAutoDetection(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner((data, dataLength) -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(getReplayableBufferData(FORM_LOCAL_URL), dataLength, trainingPoller.getFinalResult().getModelId()) .setIncludeTextContent(true).setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, true); })); } /** * Verify custom form for a data stream of multi-page labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormMultiPageLabeled(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner((data, dataLength) -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, true, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, trainingPoller.getFinalResult().getModelId()) .setFormContentType(FormContentType.APPLICATION_PDF).setPollInterval(durationTestMode)) .getSyncPoller(); syncPoller.waitForCompletion(); validateMultiPageDataLabeled(syncPoller.getFinalResult()); })); } /** * Verifies custom form data for a document using source as input stream data and valid labeled model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUnlabeledData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner((data, dataLength) -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, trainingPoller.getFinalResult().getModelId()) .setFormContentType(FormContentType.APPLICATION_PDF).setPollInterval(durationTestMode)) .getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, false); })); } /** * Verifies custom form data for a document using source as input stream data and valid include text content */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUnlabeledDataIncludeTextContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner((data, dataLength) -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, trainingPoller.getFinalResult().getModelId()) .setFormContentType(FormContentType.APPLICATION_PDF) .setIncludeTextContent(true).setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, false); }) ); } /** * Verify custom form for a data stream of multi-page unlabeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormMultiPageUnlabeled(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner((data, dataLength) -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion) .beginTraining(trainingFilesUrl, false, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, trainingPoller.getFinalResult().getModelId()) .setFormContentType(FormContentType.APPLICATION_PDF).setPollInterval(durationTestMode)) .getSyncPoller(); syncPoller.waitForCompletion(); validateMultiPageDataUnlabeled(syncPoller.getFinalResult()); }) ); } /** * Verifies custom form data for a JPG content type with unlabeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUnlabeledDataWithJpgContentType(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormJpgDataRunner((data, dataLength) -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, trainingPoller.getFinalResult().getModelId()).setFormContentType(FormContentType.IMAGE_JPEG) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, false); })); } /** * Verifies custom form data for a blank PDF content type with unlabeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUnlabeledDataWithBlankPdfContentType(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); blankPdfDataRunner((data, dataLength) -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, trainingPoller.getFinalResult().getModelId()) .setFormContentType(FormContentType.APPLICATION_PDF).setPollInterval(durationTestMode)) .getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, false); })); } /** * Verifies custom form data for an URL document data without labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlUnlabeledData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); urlRunner(fileUrl -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(fileUrl, trainingPoller.getFinalResult().getModelId()) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, false); }), FORM_JPG); } /** * Verifies custom form data for an URL document data without labeled data and include text content */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlUnlabeledDataIncludeTextContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); urlRunner(fileUrl -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(fileUrl, trainingPoller.getFinalResult().getModelId()) .setIncludeTextContent(true).setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, false); }), FORM_JPG); } /** * Verify custom form for an URL of multi-page unlabeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlMultiPageUnlabeled(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, false, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(fileUrl, trainingPoller.getFinalResult().getModelId()) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateMultiPageDataUnlabeled(syncPoller.getFinalResult()); })); } /** * Verifies that an exception is thrown for invalid status model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormInvalidSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); syncPoller.waitForCompletion(); CustomFormModel createdModel = syncPoller.getFinalResult(); StepVerifier.create(client.beginRecognizeCustomFormsFromUrl(INVALID_URL, createdModel.getModelId())) .verifyErrorSatisfies(throwable -> assertEquals(throwable.getMessage(), INVALID_SOURCE_URL_ERROR)); }); } /** * Verifies an exception thrown for a null model id when recognizing custom form from URL. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormFromUrlLabeledDataWithNullModelId(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> { Exception ex = assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions(fileUrl, null) .setPollInterval(durationTestMode)).getSyncPoller()); assertEquals(EXPECTED_MODEL_ID_IS_REQUIRED_EXCEPTION_MESSAGE, ex.getMessage()); }); } /** * Verifies an exception thrown for an empty model id for recognizing custom forms from URL. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormFromUrlLabeledDataWithEmptyModelId(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> { Exception ex = assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions(fileUrl, "") .setPollInterval(durationTestMode)).getSyncPoller()); assertEquals(EXPECTED_INVALID_UUID_EXCEPTION_MESSAGE, ex.getMessage()); }); } /** * Verifies custom form data for an URL document data with labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlLabeledData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); urlRunner(fileUrl -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(fileUrl, trainingPoller.getFinalResult().getModelId()) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, true); }), FORM_JPG); } /** * Verifies custom form data for an URL document data with labeled data and include text content */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlLabeledDataIncludeTextContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); urlRunner(fileUrl -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(fileUrl, trainingPoller.getFinalResult().getModelId()) .setIncludeTextContent(true).setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, true); }), FORM_JPG); } /** * Verify custom form for an URL of multi-page labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlMultiPageLabeled(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion) .beginTraining(trainingFilesUrl, true, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(fileUrl, trainingPoller.getFinalResult().getModelId()) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateMultiPageDataLabeled(syncPoller.getFinalResult()); })); } }
not sure what it means. ``` client.beginRecognizeContent() ``` is already a public API
public void recognizeContentResultWithBlankPdf(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent(getReplayableBufferData(BLANK_FORM_LOCAL_URL), BLANK_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, null).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }
client.beginRecognizeContent(getReplayableBufferData(BLANK_FORM_LOCAL_URL), BLANK_FORM_FILE_LENGTH,
public void recognizeContentResultWithBlankPdf(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); blankPdfDataRunner((data, dataLength) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent( new RecognizeOptions(toFluxByteBuffer(data), dataLength) .setFormContentType(FormContentType.APPLICATION_PDF).setPollInterval(durationTestMode)) .getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); }
class FormRecognizerAsyncClientTest extends FormRecognizerClientTestBase { private FormRecognizerAsyncClient client; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } private FormRecognizerAsyncClient getFormRecognizerAsyncClient(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { return getFormRecognizerClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } private FormTrainingAsyncClient getFormTrainingAsyncClient(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { return getFormTrainingClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } /** * Verifies receipt data from a document using file data as source. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptDataRunner((data) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(toFluxByteBuffer(data), RECEIPT_FILE_LENGTH, FormContentType.IMAGE_JPEG, false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies an exception thrown for a document using null data value. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataTextDetailsWithNullData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); assertThrows(RuntimeException.class, () -> client.beginRecognizeReceipts(null, RECEIPT_FILE_LENGTH, FormContentType.IMAGE_JPEG, false, null).getSyncPoller()); } /** * Verifies content type will be auto detected when using custom form API with input stream data overload. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataWithContentTypeAutoDetection(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(getReplayableBufferData(RECEIPT_LOCAL_URL), RECEIPT_FILE_LENGTH, null, false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), false); } /** * Verifies receipt data from a document using file data as source and including text content details. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataTextDetails(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptDataRunnerTextDetails((data, includeTextDetails) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(toFluxByteBuffer(data), RECEIPT_FILE_LENGTH, FormContentType.IMAGE_JPEG, includeTextDetails, null).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), true); }); } /** * Verifies receipt data from a document using PNG file data as source and including text content details. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataWithPngFile(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptPngDataRunnerTextDetails((data, includeTextDetails) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(toFluxByteBuffer(data), RECEIPT_PNG_FILE_LENGTH, FormContentType.IMAGE_PNG, includeTextDetails, null).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), true); }); } /** * Verifies receipt data from a document using blank PDF. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataWithBlankPdf(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); blankPdfDataRunner(data -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(toFluxByteBuffer(data), BLANK_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateBlankPdfResultData(syncPoller.getFinalResult()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptFromDataMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner(data -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(toFluxByteBuffer(data), MULTIPAGE_INVOICE_FILE_LENGTH, FormContentType.APPLICATION_PDF).getSyncPoller(); syncPoller.waitForCompletion(); validateMultipageReceiptData(syncPoller.getFinalResult()); }); } /** * Verifies receipt data for a document using source as file url. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptSourceUrlRunner(sourceUrl -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceiptsFromUrl(sourceUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies that an exception is thrown for invalid source url. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptInvalidSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); invalidSourceUrlRunner((sourceUrl) -> assertThrows(ErrorResponseException.class, () -> client.beginRecognizeReceiptsFromUrl(sourceUrl).getSyncPoller())); } /** * Verifies receipt data for a document using source as file url and include content when includeTextDetails is * true. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptSourceUrlTextDetails(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptSourceUrlRunnerTextDetails((sourceUrl, includeTextDetails) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceiptsFromUrl(sourceUrl, includeTextDetails, null).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), includeTextDetails); }); } /** * Verifies receipt data for a document using source as PNG file url and include content when includeTextDetails is * true. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptSourceUrlWithPngFile(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptPngSourceUrlRunnerTextDetails((sourceUrl, includeTextDetails) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceiptsFromUrl(sourceUrl, includeTextDetails, null).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), includeTextDetails); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptFromUrlMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceiptsFromUrl(fileUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateMultipageReceiptData(syncPoller.getFinalResult()); }); } /** * Verifies layout data for a document using source as input stream data. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); contentFromDataRunner((data) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent(toFluxByteBuffer(data), LAYOUT_FILE_LENGTH, FormContentType.IMAGE_JPEG, null).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies an exception thrown for a document using null data value. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentResultWithNullData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { assertThrows(RuntimeException.class, () -> client.beginRecognizeContent(null, LAYOUT_FILE_LENGTH, FormContentType.IMAGE_JPEG, null).getSyncPoller()); } /** * Verifies content type will be auto detected when using content/layout API with input stream data overload. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentResultWithContentTypeAutoDetection(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); contentFromDataRunner((data) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent(getReplayableBufferData(LAYOUT_LOCAL_URL), LAYOUT_FILE_LENGTH, null, null).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies blank form file is still a valid file to process */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils /** * Verifies throwing exception when using bad content type argument */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeBadContentTypeArgument(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); contentFromDataRunner((data) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent(toFluxByteBuffer(data), LAYOUT_FILE_LENGTH, FormContentType.fromString("application/jpeg"), null).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromDataMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner(data -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent(toFluxByteBuffer(data), MULTIPAGE_INVOICE_FILE_LENGTH, FormContentType.APPLICATION_PDF).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies layout data for a document using source as input stream data. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); contentFromUrlRunner(sourceUrl -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContentFromUrl(sourceUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies layout data for a pdf url */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromUrlWithPdf(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); pdfContentFromUrlRunner(sourceUrl -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContentFromUrl(sourceUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies that an exception is thrown for invalid status model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentInvalidSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); invalidSourceUrlRunner((invalidSourceUrl) -> assertThrows(ErrorResponseException.class, () -> client.beginRecognizeContentFromUrl(invalidSourceUrl).getSyncPoller())); } /** * Verifies layout data for a blank pdf url */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromUrlWithEncodedBlankSpace(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromUrlMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner((fileUrl) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContentFromUrl(fileUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies custom form data for a document using source as input stream data and valid labeled model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), trainingPoller.getFinalResult().getModelId(), CUSTOM_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, true, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, true); })); } /** * Verifies custom form data for a JPG content type with labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithJpgContentType(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormJpgDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), trainingPoller.getFinalResult().getModelId(), FORM_1_JPG_FILE_LENGTH, FormContentType.IMAGE_JPEG, false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, true); })); } /** * Verifies custom form data for a blank PDF content type with labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithBlankPdfContentType(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); blankPdfDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), trainingPoller.getFinalResult().getModelId(), BLANK_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, true); })); } /** * Verifies custom form data for a document using source as input stream data and valid labeled model Id, * excluding text content. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataExcludeTextContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), trainingPoller.getFinalResult().getModelId(), CUSTOM_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, true); })); } /** * Verifies an exception thrown for a document using null data value. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithNullFormData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); syncPoller.waitForCompletion(); assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomForms(null, syncPoller.getFinalResult().getModelId(), CUSTOM_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, true, null).getSyncPoller()); })); } /** * Verifies an exception thrown for a document using null model id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithNullModelId(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); syncPoller.waitForCompletion(); Exception ex = assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomForms(toFluxByteBuffer(data), null, CUSTOM_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, true, null) .getSyncPoller()); assertEquals(EXPECTED_MODEL_ID_IS_REQUIRED_EXCEPTION_MESSAGE, ex.getMessage()); })); } /** * Verifies an exception thrown for an empty model id when recognizing custom form from URL. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithEmptyModelId(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); syncPoller.waitForCompletion(); Exception ex = assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomForms(toFluxByteBuffer(data), "", CUSTOM_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, true, null).getSyncPoller()); assertEquals(EXPECTED_INVALID_UUID_EXCEPTION_MESSAGE, ex.getMessage()); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormInvalidStatus(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); invalidSourceUrlRunner((invalidSourceUrl) -> beginTrainingLabeledRunner((training, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(training, useTrainingLabels) .getSyncPoller(); syncPoller.waitForCompletion(); CustomFormModel createdModel = syncPoller.getFinalResult(); FormRecognizerException formRecognizerException = assertThrows(FormRecognizerException.class, () -> client.beginRecognizeCustomFormsFromUrl(invalidSourceUrl, createdModel.getModelId()) .getSyncPoller().getFinalResult()); ErrorInformation errorInformation = formRecognizerException.getErrorInformation().get(0); })); } /** * Verifies content type will be auto detected when using custom form API with input stream data overload. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithContentTypeAutoDetection(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(getReplayableBufferData(FORM_LOCAL_URL), trainingPoller.getFinalResult().getModelId(), CUSTOM_FORM_FILE_LENGTH, null, true, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, true); })); } /** * Verify custom form for a data stream of multi-page labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormMultiPageLabeled(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner(data -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion) .beginTraining(trainingFilesUrl, true).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), trainingPoller.getFinalResult().getModelId(), MULTIPAGE_INVOICE_FILE_LENGTH, FormContentType.APPLICATION_PDF).getSyncPoller(); syncPoller.waitForCompletion(); validateMultiPageDataLabeled(syncPoller.getFinalResult()); })); } /** * Verifies custom form data for a document using source as input stream data and valid labeled model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUnlabeledData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), trainingPoller.getFinalResult().getModelId(), CUSTOM_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, false); })); } /** * Verifies custom form data for a document using source as input stream data and valid include text content */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUnlabeledDataIncludeTextContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), trainingPoller.getFinalResult().getModelId(), CUSTOM_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, true, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, false); })); } /** * Verify custom form for a data stream of multi-page unlabeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormMultiPageUnlabeled(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner(data -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion) .beginTraining(trainingFilesUrl, false).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), trainingPoller.getFinalResult().getModelId(), MULTIPAGE_INVOICE_FILE_LENGTH, FormContentType.APPLICATION_PDF).getSyncPoller(); syncPoller.waitForCompletion(); validateMultiPageDataUnlabeled(syncPoller.getFinalResult()); })); } /** * Verifies custom form data for a JPG content type with unlabeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUnlabeledDataWithJpgContentType(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormJpgDataRunner(data -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), trainingPoller.getFinalResult().getModelId(), FORM_1_JPG_FILE_LENGTH, FormContentType.IMAGE_JPEG, false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, false); })); } /** * Verifies custom form data for a blank PDF content type with unlabeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUnlabeledDataWithBlankPdfContentType(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); blankPdfDataRunner(data -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), trainingPoller.getFinalResult().getModelId(), BLANK_FORM_FILE_LENGTH, FormContentType.APPLICATION_PDF, false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, false); })); } /** * Verifies custom form data for an URL document data without labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlUnlabeledData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); urlDataRunner(fileUrl -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomFormsFromUrl(fileUrl, trainingPoller.getFinalResult().getModelId(), false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, false); }), FORM_JPG); } /** * Verifies custom form data for an URL document data without labeled data and include text content */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlUnlabeledDataIncludeTextContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); urlDataRunner(fileUrl -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomFormsFromUrl(fileUrl, trainingPoller.getFinalResult().getModelId(), true, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, false); }), FORM_JPG); } /** * Verify custom form for an URL of multi-page unlabeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlMultiPageUnlabeled(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion) .beginTraining(trainingFilesUrl, false).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomFormsFromUrl(fileUrl, trainingPoller.getFinalResult().getModelId()) .getSyncPoller(); syncPoller.waitForCompletion(); validateMultiPageDataUnlabeled(syncPoller.getFinalResult()); })); } /** * Verifies that an exception is thrown for invalid status model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormInvalidSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); syncPoller.waitForCompletion(); CustomFormModel createdModel = syncPoller.getFinalResult(); StepVerifier.create(client.beginRecognizeCustomFormsFromUrl(INVALID_URL, createdModel.getModelId())) .verifyErrorSatisfies(throwable -> assertEquals(throwable.getMessage(), INVALID_SOURCE_URL_ERROR)); }); } /** * Verifies an exception thrown for a null model id when recognizing custom form from URL. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormFromUrlLabeledDataWithNullModelId(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion) .beginTraining(trainingFilesUrl, true).getSyncPoller(); trainingPoller.waitForCompletion(); Exception ex = assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomFormsFromUrl(fileUrl, null).getSyncPoller()); assertEquals(EXPECTED_MODEL_ID_IS_REQUIRED_EXCEPTION_MESSAGE, ex.getMessage()); })); } /** * Verifies an exception thrown for an empty model id for recognizing custom forms from URL. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormFromUrlLabeledDataWithEmptyModelId(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion) .beginTraining(trainingFilesUrl, true).getSyncPoller(); trainingPoller.waitForCompletion(); Exception ex = assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomFormsFromUrl(fileUrl, "").getSyncPoller()); assertEquals(EXPECTED_INVALID_UUID_EXCEPTION_MESSAGE, ex.getMessage()); })); } /** * Verifies custom form data for an URL document data with labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlLabeledData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); urlDataRunner(fileUrl -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomFormsFromUrl(fileUrl, trainingPoller.getFinalResult().getModelId(), false, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, true); }), FORM_JPG); } /** * Verifies custom form data for an URL document data with labeled data and include text content */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlLabeledDataIncludeTextContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); urlDataRunner(fileUrl -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomFormsFromUrl(fileUrl, trainingPoller.getFinalResult().getModelId(), true, null).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, true); }), FORM_JPG); } /** * Verify custom form for an URL of multi-page labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlMultiPageLabeled(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion) .beginTraining(trainingFilesUrl, true).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomFormsFromUrl(fileUrl, trainingPoller.getFinalResult().getModelId()) .getSyncPoller(); syncPoller.waitForCompletion(); validateMultiPageDataLabeled(syncPoller.getFinalResult()); })); } }
class FormRecognizerAsyncClientTest extends FormRecognizerClientTestBase { private FormRecognizerAsyncClient client; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } private FormRecognizerAsyncClient getFormRecognizerAsyncClient(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { return getFormRecognizerClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } private FormTrainingAsyncClient getFormTrainingAsyncClient(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { return getFormTrainingClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } /** * Verifies receipt data from a document using file data as source. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptDataRunner((data, dataLength) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(toFluxByteBuffer(data), dataLength, FormContentType.IMAGE_JPEG) .getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies an exception thrown for a document using null data value. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataNullData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); assertThrows(RuntimeException.class, () -> client.beginRecognizeReceipts(null, RECEIPT_FILE_LENGTH, FormContentType.IMAGE_JPEG).getSyncPoller()); } /** * Verifies content type will be auto detected when using custom form API with input stream data overload. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataWithContentTypeAutoDetection(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts( new RecognizeOptions(getReplayableBufferData(RECEIPT_LOCAL_URL), RECEIPT_FILE_LENGTH) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), false); } /** * Verifies receipt data from a document using file data as source and including text content details. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataTextDetails(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptDataRunnerTextDetails((data, includeTextContent) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts( new RecognizeOptions(toFluxByteBuffer(data), RECEIPT_FILE_LENGTH) .setFormContentType(FormContentType.IMAGE_JPEG).setIncludeTextContent(includeTextContent) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), includeTextContent); }); } /** * Verifies receipt data from a document using PNG file data as source and including text content details. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataWithPngFile(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptPngDataRunnerTextDetails((data, includeTextContent) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(new RecognizeOptions(toFluxByteBuffer(data), RECEIPT_PNG_FILE_LENGTH) .setFormContentType(FormContentType.IMAGE_PNG).setIncludeTextContent(includeTextContent) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), true); }); } /** * Verifies receipt data from a document using blank PDF. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataWithBlankPdf(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); blankPdfDataRunner((data, dataLength) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(new RecognizeOptions(toFluxByteBuffer(data), dataLength) .setFormContentType(FormContentType.APPLICATION_PDF).setPollInterval(durationTestMode)) .getSyncPoller(); syncPoller.waitForCompletion(); validateBlankPdfResultData(syncPoller.getFinalResult()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptFromDataMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner((data, dataLength) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts( new RecognizeOptions(toFluxByteBuffer(data), dataLength) .setFormContentType(FormContentType.APPLICATION_PDF).setPollInterval(durationTestMode)) .getSyncPoller(); syncPoller.waitForCompletion(); validateMultipageReceiptData(syncPoller.getFinalResult()); }); } /** * Verifies receipt data for a document using source as file url. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptSourceUrlRunner(sourceUrl -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceiptsFromUrl(sourceUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies that an exception is thrown for invalid source url. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptInvalidSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); invalidSourceUrlRunner((invalidSourceUrl) -> { HttpResponseException errorResponseException = assertThrows(HttpResponseException.class, () -> client.beginRecognizeReceiptsFromUrl(new RecognizeOptions(invalidSourceUrl) .setPollInterval(durationTestMode)).getSyncPoller().getFinalResult()); ErrorInformation errorInformation = (ErrorInformation) errorResponseException.getValue(); assertEquals(INVALID_IMAGE_URL_ERROR_CODE, errorInformation.getCode()); assertEquals(IMAGE_URL_IS_BADLY_FORMATTED_ERROR_MESSAGE, errorInformation.getMessage()); }); } /** * Verifies receipt data for a document using source as file url and include content when includeTextContent is * true. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptFromUrlTextContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptSourceUrlRunnerTextDetails((sourceUrl, includeTextContent) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceiptsFromUrl( new RecognizeOptions(sourceUrl).setIncludeTextContent(includeTextContent) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), includeTextContent); }); } /** * Verifies receipt data for a document using source as PNG file url and include content when includeTextContent is * true. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptSourceUrlWithPngFile(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptPngSourceUrlRunnerTextDetails((sourceUrl, includeTextContent) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceiptsFromUrl( new RecognizeOptions(sourceUrl).setIncludeTextContent(includeTextContent) .setPollInterval(durationTestMode)) .getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), includeTextContent); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptFromUrlMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts( new RecognizeOptions(fileUrl).setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateMultipageReceiptData(syncPoller.getFinalResult()); }); } /** * Verifies layout data for a document using source as input stream data. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); contentFromDataRunner((data, dataLength) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent( toFluxByteBuffer(data), dataLength, FormContentType.IMAGE_JPEG).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies an exception thrown for a document using null data value. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentResultWithNullData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { assertThrows(RuntimeException.class, () -> client.beginRecognizeContent(null, LAYOUT_FILE_LENGTH, FormContentType.IMAGE_JPEG).getSyncPoller()); } /** * Verifies content type will be auto detected when using content/layout API with input stream data overload. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentResultWithContentTypeAutoDetection(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); contentFromDataRunner((data, dataLength) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent( new RecognizeOptions(getReplayableBufferData(LAYOUT_LOCAL_URL), dataLength) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies blank form file is still a valid file to process */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromDataMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner((data, dataLength) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent( new RecognizeOptions(toFluxByteBuffer(data), dataLength) .setFormContentType(FormContentType.APPLICATION_PDF).setPollInterval(durationTestMode)) .getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies layout data for a document using source as input stream data. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); contentFromUrlRunner(sourceUrl -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContentFromUrl(sourceUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies layout data for a pdf url */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromUrlWithPdf(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); pdfContentFromUrlRunner(sourceUrl -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent(new RecognizeOptions(sourceUrl).setPollInterval(durationTestMode)) .getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies that an exception is thrown for invalid status model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentInvalidSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); invalidSourceUrlRunner((invalidSourceUrl) -> { HttpResponseException errorResponseException = assertThrows(HttpResponseException.class, () -> client.beginRecognizeContent(new RecognizeOptions(invalidSourceUrl) .setPollInterval(durationTestMode)).getSyncPoller().getFinalResult()); ErrorInformation errorInformation = (ErrorInformation) errorResponseException.getValue(); assertEquals(INVALID_IMAGE_URL_ERROR_CODE, errorInformation.getCode()); assertEquals(IMAGE_URL_IS_BADLY_FORMATTED_ERROR_MESSAGE, errorInformation.getMessage()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromUrlMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner((fileUrl) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent( new RecognizeOptions(fileUrl).setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies custom form data for a document using source as input stream data and valid labeled model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner((data, dataLength) -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, trainingPoller.getFinalResult().getModelId()) .setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, true); })); } /** * Verifies custom form data for a JPG content type with labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithJpgContentType(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormJpgDataRunner((data, dataLength) -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, trainingPoller.getFinalResult().getModelId()).setFormContentType(FormContentType.IMAGE_JPEG) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, true); })); } /** * Verifies custom form data for a blank PDF content type with labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithBlankPdfContentType(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); blankPdfDataRunner((data, dataLength) -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, trainingPoller.getFinalResult().getModelId()).setFormContentType(FormContentType.APPLICATION_PDF) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, true); })); } /** * Verifies custom form data for a document using source as input stream data and valid labeled model Id, * excluding text content. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataExcludeTextContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner((data, dataLength) -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, trainingPoller.getFinalResult().getModelId()).setFormContentType(FormContentType.APPLICATION_PDF) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, true); })); } /** * Verifies an exception thrown for a document using null data value. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithNullFormData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner((data, dataLength) -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); syncPoller.waitForCompletion(); assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions((InputStream) null, dataLength, syncPoller.getFinalResult().getModelId()).setFormContentType(FormContentType.APPLICATION_PDF) .setIncludeTextContent(true).setPollInterval(durationTestMode)).getSyncPoller()); })); } /** * Verifies an exception thrown for a document using null model id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithNullModelId(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner((data, dataLength) -> { Exception ex = assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, null).setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true) .setPollInterval(durationTestMode)).getSyncPoller()); assertEquals(EXPECTED_MODEL_ID_IS_REQUIRED_EXCEPTION_MESSAGE, ex.getMessage()); }); } /** * Verifies an exception thrown for an empty model id when recognizing custom form from URL. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithEmptyModelId(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner((data, dataLength) -> { Exception ex = assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, "") .setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true) .setPollInterval(durationTestMode)).getSyncPoller()); assertEquals(EXPECTED_INVALID_UUID_EXCEPTION_MESSAGE, ex.getMessage()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormInvalidStatus(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); invalidSourceUrlRunner((invalidSourceUrl) -> beginTrainingLabeledRunner((training, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(training, useTrainingLabels, null, durationTestMode).getSyncPoller(); syncPoller.waitForCompletion(); CustomFormModel createdModel = syncPoller.getFinalResult(); FormRecognizerException formRecognizerException = assertThrows(FormRecognizerException.class, () -> client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions( invalidSourceUrl, createdModel.getModelId()).setPollInterval(durationTestMode)) .getSyncPoller().getFinalResult()); ErrorInformation errorInformation = formRecognizerException.getErrorInformation().get(0); })); } /** * Verifies content type will be auto detected when using custom form API with input stream data overload. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithContentTypeAutoDetection(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner((data, dataLength) -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(getReplayableBufferData(FORM_LOCAL_URL), dataLength, trainingPoller.getFinalResult().getModelId()) .setIncludeTextContent(true).setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, true); })); } /** * Verify custom form for a data stream of multi-page labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormMultiPageLabeled(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner((data, dataLength) -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, true, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, trainingPoller.getFinalResult().getModelId()) .setFormContentType(FormContentType.APPLICATION_PDF).setPollInterval(durationTestMode)) .getSyncPoller(); syncPoller.waitForCompletion(); validateMultiPageDataLabeled(syncPoller.getFinalResult()); })); } /** * Verifies custom form data for a document using source as input stream data and valid labeled model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUnlabeledData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner((data, dataLength) -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, trainingPoller.getFinalResult().getModelId()) .setFormContentType(FormContentType.APPLICATION_PDF).setPollInterval(durationTestMode)) .getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, false); })); } /** * Verifies custom form data for a document using source as input stream data and valid include text content */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUnlabeledDataIncludeTextContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner((data, dataLength) -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, trainingPoller.getFinalResult().getModelId()) .setFormContentType(FormContentType.APPLICATION_PDF) .setIncludeTextContent(true).setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, false); }) ); } /** * Verify custom form for a data stream of multi-page unlabeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormMultiPageUnlabeled(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner((data, dataLength) -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion) .beginTraining(trainingFilesUrl, false, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, trainingPoller.getFinalResult().getModelId()) .setFormContentType(FormContentType.APPLICATION_PDF).setPollInterval(durationTestMode)) .getSyncPoller(); syncPoller.waitForCompletion(); validateMultiPageDataUnlabeled(syncPoller.getFinalResult()); }) ); } /** * Verifies custom form data for a JPG content type with unlabeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUnlabeledDataWithJpgContentType(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormJpgDataRunner((data, dataLength) -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, trainingPoller.getFinalResult().getModelId()).setFormContentType(FormContentType.IMAGE_JPEG) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, false); })); } /** * Verifies custom form data for a blank PDF content type with unlabeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUnlabeledDataWithBlankPdfContentType(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); blankPdfDataRunner((data, dataLength) -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), dataLength, trainingPoller.getFinalResult().getModelId()) .setFormContentType(FormContentType.APPLICATION_PDF).setPollInterval(durationTestMode)) .getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, false); })); } /** * Verifies custom form data for an URL document data without labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlUnlabeledData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); urlRunner(fileUrl -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(fileUrl, trainingPoller.getFinalResult().getModelId()) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, false); }), FORM_JPG); } /** * Verifies custom form data for an URL document data without labeled data and include text content */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlUnlabeledDataIncludeTextContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); urlRunner(fileUrl -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(fileUrl, trainingPoller.getFinalResult().getModelId()) .setIncludeTextContent(true).setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, false); }), FORM_JPG); } /** * Verify custom form for an URL of multi-page unlabeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlMultiPageUnlabeled(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, false, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(fileUrl, trainingPoller.getFinalResult().getModelId()) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateMultiPageDataUnlabeled(syncPoller.getFinalResult()); })); } /** * Verifies that an exception is thrown for invalid status model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormInvalidSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); syncPoller.waitForCompletion(); CustomFormModel createdModel = syncPoller.getFinalResult(); StepVerifier.create(client.beginRecognizeCustomFormsFromUrl(INVALID_URL, createdModel.getModelId())) .verifyErrorSatisfies(throwable -> assertEquals(throwable.getMessage(), INVALID_SOURCE_URL_ERROR)); }); } /** * Verifies an exception thrown for a null model id when recognizing custom form from URL. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormFromUrlLabeledDataWithNullModelId(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> { Exception ex = assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions(fileUrl, null) .setPollInterval(durationTestMode)).getSyncPoller()); assertEquals(EXPECTED_MODEL_ID_IS_REQUIRED_EXCEPTION_MESSAGE, ex.getMessage()); }); } /** * Verifies an exception thrown for an empty model id for recognizing custom forms from URL. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormFromUrlLabeledDataWithEmptyModelId(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> { Exception ex = assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions(fileUrl, "") .setPollInterval(durationTestMode)).getSyncPoller()); assertEquals(EXPECTED_INVALID_UUID_EXCEPTION_MESSAGE, ex.getMessage()); }); } /** * Verifies custom form data for an URL document data with labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlLabeledData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); urlRunner(fileUrl -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(fileUrl, trainingPoller.getFinalResult().getModelId()) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, true); }), FORM_JPG); } /** * Verifies custom form data for an URL document data with labeled data and include text content */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlLabeledDataIncludeTextContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); urlRunner(fileUrl -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(fileUrl, trainingPoller.getFinalResult().getModelId()) .setIncludeTextContent(true).setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, true); }), FORM_JPG); } /** * Verify custom form for an URL of multi-page labeled data */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlMultiPageLabeled(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion) .beginTraining(trainingFilesUrl, true, null, durationTestMode).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(fileUrl, trainingPoller.getFinalResult().getModelId()) .setPollInterval(durationTestMode)).getSyncPoller(); syncPoller.waitForCompletion(); validateMultiPageDataLabeled(syncPoller.getFinalResult()); })); } }
Should this print 'null/null' or just be empty strings?
void testUserAgentStringFormat() { String javaVersion = System.getProperty("java.version"); String osName = System.getProperty("os.name"); String osVersion = System.getProperty("os.version"); String plaform = new StringBuilder().append("(") .append(javaVersion).append("; ") .append(osName).append("; ") .append(osVersion).append(")") .toString(); assertEquals("azsdk-java-azure-storage-blob/12.0.0 " + plaform, UserAgentUtil.toUserAgentString(null, "azure-storage-blob", "12.0.0", null)); assertEquals("myapp azsdk-java-azure-storage-blob/12.0.0 " + plaform, UserAgentUtil.toUserAgentString("myapp", "azure-storage-blob", "12.0.0", null)); assertEquals("azsdk-java-azure-storage-blob/12.0.0", UserAgentUtil.toUserAgentString(null, "azure-storage-blob", "12.0.0", Configuration.getGlobalConfiguration().clone().put("AZURE_TELEMETRY_DISABLED", "true"))); assertEquals("myapp azsdk-java-azure-storage-blob/12.0.0", UserAgentUtil.toUserAgentString("myapp", "azure-storage-blob", "12.0.0", Configuration.getGlobalConfiguration().clone().put("AZURE_TELEMETRY_DISABLED", "true"))); assertEquals("ReallyLongApplicationIde azsdk-java-azure-storage-blob/12.0.0 " + plaform, UserAgentUtil.toUserAgentString("ReallyLongApplicationIdentity", "azure-storage-blob", "12.0.0", null)); assertEquals("myapp azsdk-java-null/null " + plaform, UserAgentUtil.toUserAgentString("myapp", null, null, null)); }
assertEquals("myapp azsdk-java-null/null " + plaform,
void testUserAgentStringFormat() { String javaVersion = System.getProperty("java.version"); String osName = System.getProperty("os.name"); String osVersion = System.getProperty("os.version"); String plaform = new StringBuilder().append("(") .append(javaVersion).append("; ") .append(osName).append("; ") .append(osVersion).append(")") .toString(); assertEquals("azsdk-java-azure-storage-blob/12.0.0 " + plaform, UserAgentUtil.toUserAgentString(null, "azure-storage-blob", "12.0.0", null)); assertEquals("myapp azsdk-java-azure-storage-blob/12.0.0 " + plaform, UserAgentUtil.toUserAgentString("myapp", "azure-storage-blob", "12.0.0", null)); assertEquals("azsdk-java-azure-storage-blob/12.0.0", UserAgentUtil.toUserAgentString(null, "azure-storage-blob", "12.0.0", Configuration.getGlobalConfiguration().clone().put("AZURE_TELEMETRY_DISABLED", "true"))); assertEquals("myapp azsdk-java-azure-storage-blob/12.0.0", UserAgentUtil.toUserAgentString("myapp", "azure-storage-blob", "12.0.0", Configuration.getGlobalConfiguration().clone().put("AZURE_TELEMETRY_DISABLED", "true"))); assertEquals("ReallyLongApplicationIde azsdk-java-azure-storage-blob/12.0.0 " + plaform, UserAgentUtil.toUserAgentString("ReallyLongApplicationIdentity", "azure-storage-blob", "12.0.0", null)); assertEquals("myapp azsdk-java-null/null " + plaform, UserAgentUtil.toUserAgentString("myapp", null, null, null)); }
class UserAgentUtilTest { @Test }
class UserAgentUtilTest { @Test }
This is the existing format and it might be useful in doing a regex match to extract fields from the user agent string.
void testUserAgentStringFormat() { String javaVersion = System.getProperty("java.version"); String osName = System.getProperty("os.name"); String osVersion = System.getProperty("os.version"); String plaform = new StringBuilder().append("(") .append(javaVersion).append("; ") .append(osName).append("; ") .append(osVersion).append(")") .toString(); assertEquals("azsdk-java-azure-storage-blob/12.0.0 " + plaform, UserAgentUtil.toUserAgentString(null, "azure-storage-blob", "12.0.0", null)); assertEquals("myapp azsdk-java-azure-storage-blob/12.0.0 " + plaform, UserAgentUtil.toUserAgentString("myapp", "azure-storage-blob", "12.0.0", null)); assertEquals("azsdk-java-azure-storage-blob/12.0.0", UserAgentUtil.toUserAgentString(null, "azure-storage-blob", "12.0.0", Configuration.getGlobalConfiguration().clone().put("AZURE_TELEMETRY_DISABLED", "true"))); assertEquals("myapp azsdk-java-azure-storage-blob/12.0.0", UserAgentUtil.toUserAgentString("myapp", "azure-storage-blob", "12.0.0", Configuration.getGlobalConfiguration().clone().put("AZURE_TELEMETRY_DISABLED", "true"))); assertEquals("ReallyLongApplicationIde azsdk-java-azure-storage-blob/12.0.0 " + plaform, UserAgentUtil.toUserAgentString("ReallyLongApplicationIdentity", "azure-storage-blob", "12.0.0", null)); assertEquals("myapp azsdk-java-null/null " + plaform, UserAgentUtil.toUserAgentString("myapp", null, null, null)); }
assertEquals("myapp azsdk-java-null/null " + plaform,
void testUserAgentStringFormat() { String javaVersion = System.getProperty("java.version"); String osName = System.getProperty("os.name"); String osVersion = System.getProperty("os.version"); String plaform = new StringBuilder().append("(") .append(javaVersion).append("; ") .append(osName).append("; ") .append(osVersion).append(")") .toString(); assertEquals("azsdk-java-azure-storage-blob/12.0.0 " + plaform, UserAgentUtil.toUserAgentString(null, "azure-storage-blob", "12.0.0", null)); assertEquals("myapp azsdk-java-azure-storage-blob/12.0.0 " + plaform, UserAgentUtil.toUserAgentString("myapp", "azure-storage-blob", "12.0.0", null)); assertEquals("azsdk-java-azure-storage-blob/12.0.0", UserAgentUtil.toUserAgentString(null, "azure-storage-blob", "12.0.0", Configuration.getGlobalConfiguration().clone().put("AZURE_TELEMETRY_DISABLED", "true"))); assertEquals("myapp azsdk-java-azure-storage-blob/12.0.0", UserAgentUtil.toUserAgentString("myapp", "azure-storage-blob", "12.0.0", Configuration.getGlobalConfiguration().clone().put("AZURE_TELEMETRY_DISABLED", "true"))); assertEquals("ReallyLongApplicationIde azsdk-java-azure-storage-blob/12.0.0 " + plaform, UserAgentUtil.toUserAgentString("ReallyLongApplicationIdentity", "azure-storage-blob", "12.0.0", null)); assertEquals("myapp azsdk-java-null/null " + plaform, UserAgentUtil.toUserAgentString("myapp", null, null, null)); }
class UserAgentUtilTest { @Test }
class UserAgentUtilTest { @Test }
sourceUrl -> invalidSourceUrl
public void recognizeReceiptInvalidSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); invalidSourceUrlRunner((sourceUrl) -> { ErrorResponseException errorResponseException = assertThrows(ErrorResponseException.class, () -> client.beginRecognizeReceiptsFromUrl(sourceUrl).getSyncPoller().getFinalResult()); assertEquals(INVALID_IMAGE_URL_ERROR_CODE, errorResponseException.getValue().getCode()); assertEquals(IMAGE_URL_IS_BADLY_FORMATTED_ERROR_MESSAGE, errorResponseException.getValue().getMessage()); }); }
invalidSourceUrlRunner((sourceUrl) -> {
public void recognizeReceiptInvalidSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); invalidSourceUrlRunner((invalidSourceUrl) -> { HttpResponseException errorResponseException = assertThrows(HttpResponseException.class, () -> client.beginRecognizeReceiptsFromUrl(invalidSourceUrl).getSyncPoller().getFinalResult()); ErrorInformation errorInformation = (ErrorInformation) errorResponseException.getValue(); assertEquals(INVALID_IMAGE_URL_ERROR_CODE, errorInformation.getCode()); assertEquals(IMAGE_URL_IS_BADLY_FORMATTED_ERROR_MESSAGE, errorInformation.getMessage()); }); }
class FormRecognizerAsyncClientTest extends FormRecognizerClientTestBase { private FormRecognizerAsyncClient client; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } private FormRecognizerAsyncClient getFormRecognizerAsyncClient(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { return getFormRecognizerClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } private FormTrainingAsyncClient getFormTrainingAsyncClient(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { return getFormTrainingClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } /** * Verifies receipt data for a document using source as file url. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptSourceUrlRunner(sourceUrl -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceiptsFromUrl(sourceUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies receipt data for a document using source as file url and include content when includeTextContent is * true. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptSourceUrlTextDetails(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptSourceUrlRunnerTextDetails((sourceUrl, includeTextContent) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(new RecognizeOptions(sourceUrl).setIncludeTextContent(includeTextContent)) .getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), includeTextContent); }); } /** * Verifies receipt data from a document using file data as source. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptDataRunner((data) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(toFluxByteBuffer(data), RECEIPT_FILE_LENGTH, FormContentType.IMAGE_JPEG) .getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies an exception thrown for a document using null data value. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataTextDetailsWithNullData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); assertThrows(RuntimeException.class, () -> client.beginRecognizeReceipts(null, RECEIPT_FILE_LENGTH, FormContentType.IMAGE_JPEG).getSyncPoller()); } /** * Verifies content type will be auto detected when using custom form API with input stream data overload. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataWithContentTypeAutoDetection(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(getReplayableBufferData(RECEIPT_LOCAL_URL), RECEIPT_FILE_LENGTH, null) .getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), false); } /** * Verifies receipt data from a document using file data as source and including text content details. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataTextDetails(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptDataRunnerTextDetails((data, includeTextContent) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(new RecognizeOptions(toFluxByteBuffer(data), RECEIPT_FILE_LENGTH) .setFormContentType(FormContentType.IMAGE_JPEG).setIncludeTextContent(includeTextContent)) .getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), true); }); } /** * Verifies that an exception is thrown for invalid source url. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils /** * Verifies layout data for a document using source as input stream data. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); contentFromDataRunner((data) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent(toFluxByteBuffer(data), LAYOUT_FILE_LENGTH, FormContentType.IMAGE_JPEG) .getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies an exception thrown for a document using null data value. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentResultWithNullData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { assertThrows(RuntimeException.class, () -> client.beginRecognizeContent(null, LAYOUT_FILE_LENGTH, FormContentType.IMAGE_JPEG).getSyncPoller()); } /** * Verifies content type will be auto detected when using content/layout API with input stream data overload. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentResultWithContentTypeAutoDetection(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); contentFromDataRunner((data) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent(getReplayableBufferData(LAYOUT_LOCAL_URL), LAYOUT_FILE_LENGTH, null) .getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies layout data for a document using source as input stream data. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); contentFromUrlRunner(sourceUrl -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContentFromUrl(sourceUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies that an exception is thrown for invalid status model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentInvalidSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); invalidSourceUrlRunner((invalidSourceUrl) -> { ErrorResponseException errorResponseException = assertThrows(ErrorResponseException.class, () -> client.beginRecognizeContentFromUrl(invalidSourceUrl).getSyncPoller().getFinalResult()); assertEquals(INVALID_IMAGE_URL_ERROR_CODE, errorResponseException.getValue().getCode()); assertEquals(IMAGE_URL_IS_BADLY_FORMATTED_ERROR_MESSAGE, errorResponseException.getValue().getMessage()); }); } /** * Verifies that an exception is thrown for invalid status model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormInvalidSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); syncPoller.waitForCompletion(); CustomFormModel createdModel = syncPoller.getFinalResult(); StepVerifier.create(client.beginRecognizeCustomFormsFromUrl(INVALID_URL, createdModel.getModelId())) .verifyErrorSatisfies(throwable -> assertEquals(throwable.getMessage(), INVALID_SOURCE_URL_ERROR)); }); } /** * Verifies custom form data for a document using source as input stream data and valid labeled model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions(toFluxByteBuffer(data), CUSTOM_FORM_FILE_LENGTH, trainingPoller.getFinalResult().getModelId()) .setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, true); })); } /** * Verifies an exception thrown for a document using null data value or null model id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithNullValues(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); syncPoller.waitForCompletion(); assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions((InputStream) null, CUSTOM_FORM_FILE_LENGTH, syncPoller.getFinalResult().getModelId()).setFormContentType(FormContentType.APPLICATION_PDF) .setIncludeTextContent(true)).getSyncPoller()); assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), CUSTOM_FORM_FILE_LENGTH, null).setFormContentType(FormContentType.APPLICATION_PDF) .setIncludeTextContent(true)).getSyncPoller()); })); } /** * Verifies content type will be auto detected when using custom form API with input stream data overload. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithContentTypeAutoDetection(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions(getReplayableBufferData(FORM_LOCAL_URL), CUSTOM_FORM_FILE_LENGTH, trainingPoller.getFinalResult().getModelId()).setFormContentType(null) .setIncludeTextContent(true)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, true); })); } /** * Verifies custom form data for a document using source as input stream data and valid labeled model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUnlabeledData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions(toFluxByteBuffer(data), CUSTOM_FORM_FILE_LENGTH, trainingPoller.getFinalResult().getModelId()).setFormContentType(FormContentType.APPLICATION_PDF) .setIncludeTextContent(false)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, false); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormMultiPageUnlabeled(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner(data -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, false).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), MULTIPAGE_INVOICE_FILE_LENGTH, trainingPoller.getFinalResult().getModelId(), FormContentType.APPLICATION_PDF).getSyncPoller(); syncPoller.waitForCompletion(); validateMultiPageDataUnlabeled(syncPoller.getFinalResult()); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlMultiPageLabeled(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, true).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomFormsFromUrl(fileUrl, trainingPoller.getFinalResult().getModelId()).getSyncPoller(); syncPoller.waitForCompletion(); validateMultiPageDataLabeled(syncPoller.getFinalResult()); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptFromUrlMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceiptsFromUrl(fileUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateMultipageReceiptData(syncPoller.getFinalResult()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptFromDataMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner(data -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(toFluxByteBuffer(data), MULTIPAGE_INVOICE_FILE_LENGTH, FormContentType.APPLICATION_PDF).getSyncPoller(); syncPoller.waitForCompletion(); validateMultipageReceiptData(syncPoller.getFinalResult()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromUrlMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner((fileUrl) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContentFromUrl(fileUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromDataMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner(data -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent(toFluxByteBuffer(data), MULTIPAGE_INVOICE_FILE_LENGTH, FormContentType.APPLICATION_PDF).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils void recognizeCustomFormInvalidStatus(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); invalidSourceUrlRunner((invalidSourceUrl) -> beginTrainingLabeledRunner((training, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(training, useTrainingLabels).getSyncPoller(); syncPoller.waitForCompletion(); CustomFormModel createdModel = syncPoller.getFinalResult(); FormRecognizerException formRecognizerException = assertThrows(FormRecognizerException.class, () -> client.beginRecognizeCustomFormsFromUrl(invalidSourceUrl, createdModel.getModelId()).getSyncPoller().getFinalResult()); ErrorInformation errorInformation = formRecognizerException.getErrorInformation().get(0); })); } }
class FormRecognizerAsyncClientTest extends FormRecognizerClientTestBase { private FormRecognizerAsyncClient client; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } private FormRecognizerAsyncClient getFormRecognizerAsyncClient(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { return getFormRecognizerClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } private FormTrainingAsyncClient getFormTrainingAsyncClient(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { return getFormTrainingClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } /** * Verifies receipt data for a document using source as file url. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptSourceUrlRunner(sourceUrl -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceiptsFromUrl(sourceUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies receipt data for a document using source as file url and include content when includeTextContent is * true. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptSourceUrlTextDetails(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptSourceUrlRunnerTextDetails((sourceUrl, includeTextContent) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(new RecognizeOptions(sourceUrl).setIncludeTextContent(includeTextContent)) .getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), includeTextContent); }); } /** * Verifies receipt data from a document using file data as source. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptDataRunner((data) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(toFluxByteBuffer(data), RECEIPT_FILE_LENGTH, FormContentType.IMAGE_JPEG) .getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies an exception thrown for a document using null data value. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataTextDetailsWithNullData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); assertThrows(RuntimeException.class, () -> client.beginRecognizeReceipts(null, RECEIPT_FILE_LENGTH, FormContentType.IMAGE_JPEG).getSyncPoller()); } /** * Verifies content type will be auto detected when using custom form API with input stream data overload. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataWithContentTypeAutoDetection(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(getReplayableBufferData(RECEIPT_LOCAL_URL), RECEIPT_FILE_LENGTH, null) .getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), false); } /** * Verifies receipt data from a document using file data as source and including text content details. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptDataTextDetails(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); receiptDataRunnerTextDetails((data, includeTextContent) -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(new RecognizeOptions(toFluxByteBuffer(data), RECEIPT_FILE_LENGTH) .setFormContentType(FormContentType.IMAGE_JPEG).setIncludeTextContent(includeTextContent)) .getSyncPoller(); syncPoller.waitForCompletion(); validateReceiptResultData(syncPoller.getFinalResult(), true); }); } /** * Verifies that an exception is thrown for invalid source url. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils /** * Verifies layout data for a document using source as input stream data. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContent(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); contentFromDataRunner((data) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent(toFluxByteBuffer(data), LAYOUT_FILE_LENGTH, FormContentType.IMAGE_JPEG) .getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies an exception thrown for a document using null data value. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentResultWithNullData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { assertThrows(RuntimeException.class, () -> client.beginRecognizeContent(null, LAYOUT_FILE_LENGTH, FormContentType.IMAGE_JPEG).getSyncPoller()); } /** * Verifies content type will be auto detected when using content/layout API with input stream data overload. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentResultWithContentTypeAutoDetection(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); contentFromDataRunner((data) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent(getReplayableBufferData(LAYOUT_LOCAL_URL), LAYOUT_FILE_LENGTH, null) .getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies layout data for a document using source as input stream data. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); contentFromUrlRunner(sourceUrl -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContentFromUrl(sourceUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } /** * Verifies that an exception is thrown for invalid status model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentInvalidSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); invalidSourceUrlRunner((invalidSourceUrl) -> { HttpResponseException errorResponseException = assertThrows(HttpResponseException.class, () -> client.beginRecognizeContentFromUrl(invalidSourceUrl).getSyncPoller().getFinalResult()); ErrorInformation errorInformation = (ErrorInformation) errorResponseException.getValue(); assertEquals(INVALID_IMAGE_URL_ERROR_CODE, errorInformation.getCode()); assertEquals(IMAGE_URL_IS_BADLY_FORMATTED_ERROR_MESSAGE, errorInformation.getMessage()); }); } /** * Verifies that an exception is thrown for invalid status model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormInvalidSourceUrl(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); syncPoller.waitForCompletion(); CustomFormModel createdModel = syncPoller.getFinalResult(); StepVerifier.create(client.beginRecognizeCustomFormsFromUrl(INVALID_URL, createdModel.getModelId())) .verifyErrorSatisfies(throwable -> assertEquals(throwable.getMessage(), INVALID_SOURCE_URL_ERROR)); }); } /** * Verifies custom form data for a document using source as input stream data and valid labeled model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions(toFluxByteBuffer(data), CUSTOM_FORM_FILE_LENGTH, trainingPoller.getFinalResult().getModelId()) .setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, true); })); } /** * Verifies an exception thrown for a document using null data value or null model id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithNullValues(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); syncPoller.waitForCompletion(); assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions((InputStream) null, CUSTOM_FORM_FILE_LENGTH, syncPoller.getFinalResult().getModelId()).setFormContentType(FormContentType.APPLICATION_PDF) .setIncludeTextContent(true)).getSyncPoller()); assertThrows(RuntimeException.class, () -> client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(toFluxByteBuffer(data), CUSTOM_FORM_FILE_LENGTH, null).setFormContentType(FormContentType.APPLICATION_PDF) .setIncludeTextContent(true)).getSyncPoller()); })); } /** * Verifies content type will be auto detected when using custom form API with input stream data overload. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormLabeledDataWithContentTypeAutoDetection(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingLabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions(getReplayableBufferData(FORM_LOCAL_URL), CUSTOM_FORM_FILE_LENGTH, trainingPoller.getFinalResult().getModelId()).setFormContentType(null) .setIncludeTextContent(true)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), true, true); })); } /** * Verifies custom form data for a document using source as input stream data and valid labeled model Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUnlabeledData(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); customFormDataRunner(data -> beginTrainingUnlabeledRunner((trainingFilesUrl, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, useTrainingLabels).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions(toFluxByteBuffer(data), CUSTOM_FORM_FILE_LENGTH, trainingPoller.getFinalResult().getModelId()).setFormContentType(FormContentType.APPLICATION_PDF) .setIncludeTextContent(false)).getSyncPoller(); syncPoller.waitForCompletion(); validateRecognizedResult(syncPoller.getFinalResult(), false, false); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormMultiPageUnlabeled(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner(data -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, false).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(data), MULTIPAGE_INVOICE_FILE_LENGTH, trainingPoller.getFinalResult().getModelId(), FormContentType.APPLICATION_PDF).getSyncPoller(); syncPoller.waitForCompletion(); validateMultiPageDataUnlabeled(syncPoller.getFinalResult()); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeCustomFormUrlMultiPageLabeled(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> beginTrainingMultipageRunner((trainingFilesUrl) -> { SyncPoller<OperationResult, CustomFormModel> trainingPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(trainingFilesUrl, true).getSyncPoller(); trainingPoller.waitForCompletion(); SyncPoller<OperationResult, List<RecognizedForm>> syncPoller = client.beginRecognizeCustomFormsFromUrl(fileUrl, trainingPoller.getFinalResult().getModelId()).getSyncPoller(); syncPoller.waitForCompletion(); validateMultiPageDataLabeled(syncPoller.getFinalResult()); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptFromUrlMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner(fileUrl -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceiptsFromUrl(fileUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateMultipageReceiptData(syncPoller.getFinalResult()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeReceiptFromDataMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner(data -> { SyncPoller<OperationResult, List<RecognizedReceipt>> syncPoller = client.beginRecognizeReceipts(toFluxByteBuffer(data), MULTIPAGE_INVOICE_FILE_LENGTH, FormContentType.APPLICATION_PDF).getSyncPoller(); syncPoller.waitForCompletion(); validateMultipageReceiptData(syncPoller.getFinalResult()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromUrlMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromUrlRunner((fileUrl) -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContentFromUrl(fileUrl).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils public void recognizeContentFromDataMultiPage(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); multipageFromDataRunner(data -> { SyncPoller<OperationResult, List<FormPage>> syncPoller = client.beginRecognizeContent(toFluxByteBuffer(data), MULTIPAGE_INVOICE_FILE_LENGTH, FormContentType.APPLICATION_PDF).getSyncPoller(); syncPoller.waitForCompletion(); validateContentResultData(syncPoller.getFinalResult(), false); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.formrecognizer.TestUtils void recognizeCustomFormInvalidStatus(HttpClient httpClient, FormRecognizerServiceVersion serviceVersion) { client = getFormRecognizerAsyncClient(httpClient, serviceVersion); invalidSourceUrlRunner((invalidSourceUrl) -> beginTrainingLabeledRunner((training, useTrainingLabels) -> { SyncPoller<OperationResult, CustomFormModel> syncPoller = getFormTrainingAsyncClient(httpClient, serviceVersion).beginTraining(training, useTrainingLabels).getSyncPoller(); syncPoller.waitForCompletion(); CustomFormModel createdModel = syncPoller.getFinalResult(); FormRecognizerException formRecognizerException = assertThrows(FormRecognizerException.class, () -> client.beginRecognizeCustomFormsFromUrl(invalidSourceUrl, createdModel.getModelId()).getSyncPoller().getFinalResult()); })); } }
Here last `AsyncPollResponse` will be in `FAILED` state with error in `PollResult`. Unlike track1 client-runtime, it will not throw CloudException during the polling.
public void lroBasedOnAsyncOperationFailed() { ServerConfigure serverConfigure = new ServerConfigure(); final String resourceEndpoint = "/resource/1"; final String operationEndpoint = "/operations/1"; ResponseTransformer provisioningStateLroService = new ResponseTransformer() { private final int[] getCallCount = new int[1]; @Override public com.github.tomakehurst.wiremock.http.Response transform(Request request, com.github.tomakehurst.wiremock.http.Response response, FileSource fileSource, Parameters parameters) { if (!request.getUrl().endsWith(resourceEndpoint) && !request.getUrl().endsWith(operationEndpoint)) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .status(500) .body("Unsupported path:" + request.getUrl()) .build(); } if (request.getMethod().isOneOf(RequestMethod.PUT)) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .headers(new HttpHeaders( new HttpHeader("Azure-AsyncOperation", request.getAbsoluteUrl().replace(resourceEndpoint, operationEndpoint)))) .body(toJson(new FooWithProvisioningState("Creating"))) .status(201) .build(); } if (request.getMethod().isOneOf(RequestMethod.GET)) { if (request.getUrl().endsWith(operationEndpoint)) { getCallCount[0]++; if (getCallCount[0] < serverConfigure.pollingCountTillSuccess) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .body("{\"status\": \"InProgress\"}") .build(); } else if (getCallCount[0] == serverConfigure.pollingCountTillSuccess) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .body("{\"status\": \"Failed\"}") .build(); } } else { return new com.github.tomakehurst.wiremock.http.Response.Builder() .status(400) .body("Invalid state:" + request.getUrl()) .build(); } } return response; } @Override public String getName() { return "LroService"; } }; WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint, operationEndpoint); lroServer.start(); try { final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class, createHttpPipeline(lroServer.port()), SERIALIZER); PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux = PollerFactory.create(SERIALIZER, new HttpPipelineBuilder().build(), FooWithProvisioningState.class, FooWithProvisioningState.class, POLLING_DURATION, newLroInitFunction(client)); int[] onNextCallCount = new int[1]; AsyncPollResponse<PollResult<FooWithProvisioningState>, FooWithProvisioningState> pollResponse = lroFlux.doOnNext(response -> { PollResult<FooWithProvisioningState> pollResult = response.getValue(); Assertions.assertNotNull(pollResult); onNextCallCount[0]++; if (onNextCallCount[0] == 1) { Assertions.assertNotNull(pollResult.getValue()); Assertions.assertEquals(LongRunningOperationStatus.IN_PROGRESS, response.getStatus()); } else if (onNextCallCount[0] == 2) { Assertions.assertEquals(LongRunningOperationStatus.FAILED, response.getStatus()); } else { throw new IllegalStateException("Poller emitted more than expected value."); } }).blockLast(); Assertions.assertEquals(LongRunningOperationStatus.FAILED, pollResponse.getStatus()); } finally { if (lroServer.isRunning()) { lroServer.shutdown(); } } }
Assertions.assertEquals(LongRunningOperationStatus.FAILED, pollResponse.getStatus());
public void lroBasedOnAsyncOperationFailed() { ServerConfigure serverConfigure = new ServerConfigure(); final String resourceEndpoint = "/resource/1"; final String operationEndpoint = "/operations/1"; ResponseTransformer provisioningStateLroService = new ResponseTransformer() { private final int[] getCallCount = new int[1]; @Override public com.github.tomakehurst.wiremock.http.Response transform(Request request, com.github.tomakehurst.wiremock.http.Response response, FileSource fileSource, Parameters parameters) { if (!request.getUrl().endsWith(resourceEndpoint) && !request.getUrl().endsWith(operationEndpoint)) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .status(500) .body("Unsupported path:" + request.getUrl()) .build(); } if (request.getMethod().isOneOf(RequestMethod.PUT)) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .headers(new HttpHeaders( new HttpHeader("Azure-AsyncOperation", request.getAbsoluteUrl().replace(resourceEndpoint, operationEndpoint)))) .body(toJson(new FooWithProvisioningState("Creating"))) .status(201) .build(); } if (request.getMethod().isOneOf(RequestMethod.GET)) { if (request.getUrl().endsWith(operationEndpoint)) { getCallCount[0]++; if (getCallCount[0] < serverConfigure.pollingCountTillSuccess) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .body("{\"status\": \"InProgress\"}") .build(); } else if (getCallCount[0] == serverConfigure.pollingCountTillSuccess) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .body("{\"status\": \"Failed\"}") .build(); } } else { return new com.github.tomakehurst.wiremock.http.Response.Builder() .status(400) .body("Invalid state:" + request.getUrl()) .build(); } } return response; } @Override public String getName() { return "LroService"; } }; WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint, operationEndpoint); lroServer.start(); try { final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class, createHttpPipeline(lroServer.port()), SERIALIZER); PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux = PollerFactory.create(SERIALIZER, new HttpPipelineBuilder().build(), FooWithProvisioningState.class, FooWithProvisioningState.class, POLLING_DURATION, newLroInitFunction(client)); int[] onNextCallCount = new int[1]; AsyncPollResponse<PollResult<FooWithProvisioningState>, FooWithProvisioningState> pollResponse = lroFlux.doOnNext(response -> { PollResult<FooWithProvisioningState> pollResult = response.getValue(); Assertions.assertNotNull(pollResult); onNextCallCount[0]++; if (onNextCallCount[0] == 1) { Assertions.assertNotNull(pollResult.getValue()); Assertions.assertEquals(LongRunningOperationStatus.IN_PROGRESS, response.getStatus()); } else if (onNextCallCount[0] == 2) { Assertions.assertEquals(LongRunningOperationStatus.FAILED, response.getStatus()); } else { throw new IllegalStateException("Poller emitted more than expected value."); } }).blockLast(); Assertions.assertEquals(LongRunningOperationStatus.FAILED, pollResponse.getStatus()); } finally { if (lroServer.isRunning()) { lroServer.shutdown(); } } }
class LROPollerTests { private static final SerializerAdapter SERIALIZER = new AzureJacksonAdapter(); private static final Duration POLLING_DURATION = Duration.ofMillis(100); @BeforeEach public void beforeTest() { MockitoAnnotations.initMocks(this); } @AfterEach public void afterTest() { Mockito.framework().clearInlineMocks(); } @Host("http: @ServiceInterface(name = "ProvisioningStateLroService") interface ProvisioningStateLroServiceClient { @Put("/resource/1") Mono<Response<Flux<ByteBuffer>>> startLro(Context context); } @Test public void lroBasedOnProvisioningState() { WireMockServer lroServer = startServer(); try { final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class, createHttpPipeline(lroServer.port()), SERIALIZER); PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux = PollerFactory.create(SERIALIZER, new HttpPipelineBuilder().build(), FooWithProvisioningState.class, FooWithProvisioningState.class, POLLING_DURATION, newLroInitFunction(client)); int[] onNextCallCount = new int[1]; lroFlux.doOnNext(response -> { PollResult<FooWithProvisioningState> pollResult = response.getValue(); Assertions.assertNotNull(pollResult); Assertions.assertNotNull(pollResult.getValue()); onNextCallCount[0]++; if (onNextCallCount[0] == 1) { Assertions.assertEquals(LongRunningOperationStatus.IN_PROGRESS, response.getStatus()); Assertions.assertNull(pollResult.getValue().getResourceId()); } else if (onNextCallCount[0] == 2) { Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, response.getStatus()); Assertions.assertNotNull(pollResult.getValue().getResourceId()); } else { throw new IllegalStateException("Poller emitted more than expected value."); } }).blockLast(); } finally { if (lroServer.isRunning()) { lroServer.shutdown(); } } } @Test public void lroBasedOnAsyncOperation() { ServerConfigure serverConfigure = new ServerConfigure(); final String resourceEndpoint = "/resource/1"; final String operationEndpoint = "/operations/1"; ResponseTransformer provisioningStateLroService = new ResponseTransformer() { private final int[] getCallCount = new int[1]; @Override public com.github.tomakehurst.wiremock.http.Response transform(Request request, com.github.tomakehurst.wiremock.http.Response response, FileSource fileSource, Parameters parameters) { if (!request.getUrl().endsWith(resourceEndpoint) && !request.getUrl().endsWith(operationEndpoint)) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .status(500) .body("Unsupported path:" + request.getUrl()) .build(); } if (request.getMethod().isOneOf(RequestMethod.PUT)) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .headers(new HttpHeaders( new HttpHeader("Azure-AsyncOperation", request.getAbsoluteUrl().replace(resourceEndpoint, operationEndpoint)))) .body(toJson(new FooWithProvisioningState("Creating"))) .status(201) .build(); } if (request.getMethod().isOneOf(RequestMethod.GET)) { if (request.getUrl().endsWith(operationEndpoint)) { getCallCount[0]++; if (getCallCount[0] < serverConfigure.pollingCountTillSuccess) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .body("{\"status\": \"InProgress\"}") .build(); } else if (getCallCount[0] == serverConfigure.pollingCountTillSuccess) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .body("{\"status\": \"Succeeded\"}") .build(); } } else if (request.getUrl().endsWith(resourceEndpoint) && getCallCount[0] == serverConfigure.pollingCountTillSuccess) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .body(toJson(new FooWithProvisioningState("Succeeded", UUID.randomUUID().toString()))) .build(); } else { return new com.github.tomakehurst.wiremock.http.Response.Builder() .status(400) .body("Invalid state:" + request.getUrl()) .build(); } } return response; } @Override public String getName() { return "LroService"; } }; WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint, operationEndpoint); lroServer.start(); try { final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class, createHttpPipeline(lroServer.port()), SERIALIZER); PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux = PollerFactory.create(SERIALIZER, new HttpPipelineBuilder().build(), FooWithProvisioningState.class, FooWithProvisioningState.class, POLLING_DURATION, newLroInitFunction(client)); int[] onNextCallCount = new int[1]; AsyncPollResponse<PollResult<FooWithProvisioningState>, FooWithProvisioningState> pollResponse = lroFlux.doOnNext(response -> { PollResult<FooWithProvisioningState> pollResult = response.getValue(); Assertions.assertNotNull(pollResult); Assertions.assertNotNull(pollResult.getValue()); onNextCallCount[0]++; if (onNextCallCount[0] == 1) { Assertions.assertEquals(LongRunningOperationStatus.IN_PROGRESS, response.getStatus()); } else if (onNextCallCount[0] == 2) { Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, response.getStatus()); } else { throw new IllegalStateException("Poller emitted more than expected value."); } }).blockLast(); FooWithProvisioningState foo = pollResponse.getFinalResult().block(); Assertions.assertNotNull(foo.getResourceId()); Assertions.assertEquals("Succeeded", foo.getProvisioningState()); } finally { if (lroServer.isRunning()) { lroServer.shutdown(); } } } @Test @Test public void lroSucceededNoPoll() { final String resourceEndpoint = "/resource/1"; final String sampleVaultUpdateSucceededResponse = "{\"id\":\"/subscriptions/ ResponseTransformer provisioningStateLroService = new ResponseTransformer() { @Override public com.github.tomakehurst.wiremock.http.Response transform(Request request, com.github.tomakehurst.wiremock.http.Response response, FileSource fileSource, Parameters parameters) { if (!request.getUrl().endsWith(resourceEndpoint)) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .status(500) .body("Unsupported path:" + request.getUrl()) .build(); } if (request.getMethod().isOneOf(RequestMethod.PUT)) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .status(200) .body(sampleVaultUpdateSucceededResponse) .build(); } return response; } @Override public String getName() { return "LroService"; } }; WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint); lroServer.start(); try { final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class, createHttpPipeline(lroServer.port()), SERIALIZER); PollerFlux<PollResult<Resource>, Resource> lroFlux = PollerFactory.create(SERIALIZER, new HttpPipelineBuilder().build(), Resource.class, Resource.class, POLLING_DURATION, newLroInitFunction(client)); StepVerifier.create(lroFlux) .expectSubscription() .expectNextMatches(response -> { PollResult<Resource> pollResult = response.getValue(); return response.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED && pollResult != null && pollResult.getValue() != null && pollResult.getValue().id() != null; }).verifyComplete(); AsyncPollResponse<PollResult<Resource>, Resource> asyncPollResponse = lroFlux.blockLast(); Assertions.assertNotNull(asyncPollResponse); Resource result = asyncPollResponse.getFinalResult().block(); Assertions.assertNotNull(result); Assertions.assertNotNull(result.id()); Assertions.assertEquals("v1weidxu", result.name()); Assertions.assertEquals("Microsoft.KeyVault/vaults", result.type()); } finally { if (lroServer.isRunning()) { lroServer.shutdown(); } } } @Test public void lroTimeout() { final Duration timeoutDuration = Duration.ofMillis(1000); final String resourceEndpoint = "/resource/1"; final AtomicInteger getCallCount = new AtomicInteger(0); ResponseTransformer provisioningStateLroService = new ResponseTransformer() { @Override public com.github.tomakehurst.wiremock.http.Response transform(Request request, com.github.tomakehurst.wiremock.http.Response response, FileSource fileSource, Parameters parameters) { if (!request.getUrl().endsWith(resourceEndpoint)) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .status(500) .body("Unsupported path:" + request.getUrl()) .build(); } if (request.getMethod().isOneOf(RequestMethod.PUT, RequestMethod.GET)) { if (request.getMethod().isOneOf(RequestMethod.GET)) { getCallCount.getAndIncrement(); } return new com.github.tomakehurst.wiremock.http.Response.Builder() .body(toJson(new FooWithProvisioningState("IN_PROGRESS"))) .build(); } return response; } @Override public String getName() { return "LroService"; } }; WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint); lroServer.start(); try { final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class, createHttpPipeline(lroServer.port()), SERIALIZER); PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux = PollerFactory.create(SERIALIZER, new HttpPipelineBuilder().build(), FooWithProvisioningState.class, FooWithProvisioningState.class, POLLING_DURATION, newLroInitFunction(client)); Mono<FooWithProvisioningState> resultMonoWithTimeout = lroFlux.last() .flatMap(AsyncPollResponse::getFinalResult) .timeout(timeoutDuration); StepVerifier.create(resultMonoWithTimeout) .thenAwait() .verifyError(TimeoutException.class); int count = getCallCount.get(); try { Thread.sleep(timeoutDuration.toMillis()); } catch (InterruptedException e) { } Assertions.assertEquals(count, getCallCount.get()); } finally { if (lroServer.isRunning()) { lroServer.shutdown(); } } } @Test public void lroRetryAfter() { ServerConfigure configure = new ServerConfigure(); Duration expectedPollingDuration = Duration.ofSeconds(3); configure.pollingCountTillSuccess = 3; configure.additionalHeaders = new HttpHeaders(new HttpHeader("Retry-After", "1")); WireMockServer lroServer = startServer(configure); try { final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class, createHttpPipeline(lroServer.port()), SERIALIZER); PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux = PollerFactory.create(SERIALIZER, new HttpPipelineBuilder().build(), FooWithProvisioningState.class, FooWithProvisioningState.class, POLLING_DURATION, newLroInitFunction(client)); long nanoTime = System.nanoTime(); FooWithProvisioningState result = lroFlux .doOnNext(response -> { System.out.println(String.format("[%s] status %s", OffsetDateTime.now().toString(), response.getStatus().toString())); }).blockLast() .getFinalResult().block(); Assertions.assertNotNull(result); Duration pollingDuration = Duration.ofNanos(System.nanoTime() - nanoTime); Assertions.assertTrue(pollingDuration.compareTo(expectedPollingDuration) > 0); } finally { if (lroServer.isRunning()) { lroServer.shutdown(); } } } @Test public void lroContext() { WireMockServer lroServer = startServer(); HttpPipelinePolicy contextVerifyPolicy = (context, next) -> { Optional<Object> valueOpt = context.getData("key1"); if (valueOpt.isPresent() && "value1".equals(valueOpt.get())) { return next.process(); } else { return Mono.error(new AssertionError()); } }; try { final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class, createHttpPipeline(lroServer.port(), Collections.singletonList(contextVerifyPolicy)), SERIALIZER); Flux<AsyncPollResponse<PollResult<FooWithProvisioningState>, FooWithProvisioningState>> lroFlux = PollerFactory.create(SERIALIZER, new HttpPipelineBuilder().build(), FooWithProvisioningState.class, FooWithProvisioningState.class, POLLING_DURATION, newLroInitFunction(client)); lroFlux = lroFlux.subscriberContext(context -> context.put("key1", "value1")); FooWithProvisioningState result = lroFlux .blockLast() .getFinalResult() .block(); Assertions.assertNotNull(result); } finally { if (lroServer.isRunning()) { lroServer.shutdown(); } } } private static class ServerConfigure { private int pollingCountTillSuccess = 2; private HttpHeaders additionalHeaders = HttpHeaders.noHeaders(); } private static WireMockServer startServer() { return startServer(new ServerConfigure()); } private static WireMockServer startServer(ServerConfigure serverConfigure) { final String resourceEndpoint = "/resource/1"; ResponseTransformer provisioningStateLroService = new ResponseTransformer() { private final int[] getCallCount = new int[1]; @Override public com.github.tomakehurst.wiremock.http.Response transform(Request request, com.github.tomakehurst.wiremock.http.Response response, FileSource fileSource, Parameters parameters) { if (!request.getUrl().endsWith(resourceEndpoint)) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .status(500) .body("Unsupported path:" + request.getUrl()) .build(); } if (request.getMethod().isOneOf(RequestMethod.PUT)) { System.out.println(String.format("[%s] PUT status %s", OffsetDateTime.now().toString(), "IN_PROGRESS")); return new com.github.tomakehurst.wiremock.http.Response.Builder() .headers(serverConfigure.additionalHeaders) .body(toJson(new FooWithProvisioningState("IN_PROGRESS"))) .build(); } if (request.getMethod().isOneOf(RequestMethod.GET)) { getCallCount[0]++; if (getCallCount[0] < serverConfigure.pollingCountTillSuccess) { System.out.println(String.format("[%s] GET status %s", OffsetDateTime.now().toString(), "IN_PROGRESS")); return new com.github.tomakehurst.wiremock.http.Response.Builder() .headers(serverConfigure.additionalHeaders) .body(toJson(new FooWithProvisioningState("IN_PROGRESS"))) .build(); } else if (getCallCount[0] == serverConfigure.pollingCountTillSuccess) { System.out.println(String.format("[%s] GET status %s", OffsetDateTime.now().toString(), "SUCCEEDED")); return new com.github.tomakehurst.wiremock.http.Response.Builder() .body(toJson(new FooWithProvisioningState("SUCCEEDED", UUID.randomUUID().toString()))) .build(); } } return response; } @Override public String getName() { return "LroService"; } }; WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint); lroServer.start(); return lroServer; } private static WireMockServer createServer(ResponseTransformer transformer, String... endpoints) { WireMockServer server = new WireMockServer(WireMockConfiguration .options() .dynamicPort() .extensions(transformer) .disableRequestJournal()); for (String endpoint : endpoints) { server.stubFor(WireMock.any(WireMock.urlEqualTo(endpoint)) .willReturn(WireMock.aResponse())); } return server; } private static HttpPipeline createHttpPipeline(int port) { return createHttpPipeline(port, Collections.emptyList()); } private static HttpPipeline createHttpPipeline(int port, List<HttpPipelinePolicy> additionalPolicies) { List<HttpPipelinePolicy> policies = new ArrayList<>(additionalPolicies); policies.add(new HttpPipelinePolicy() { @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest request = context.getHttpRequest(); request.setUrl(updatePort(request.getUrl(), port)); context.setHttpRequest(request); return next.process(); } private URL updatePort(URL url, int port) { try { return new URL(url.getProtocol(), url.getHost(), port, url.getFile()); } catch (MalformedURLException mue) { throw new RuntimeException(mue); } } }); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } private Mono<Response<Flux<ByteBuffer>>> newLroInitFunction(ProvisioningStateLroServiceClient client) { return FluxUtil.fluxContext(context -> client.startLro(context).flux()).next(); } private static String toJson(Object object) { try { return SERIALIZER.serialize(object, SerializerEncoding.JSON); } catch (IOException ioe) { throw new RuntimeException(ioe); } } }
class LROPollerTests { private static final SerializerAdapter SERIALIZER = new AzureJacksonAdapter(); private static final Duration POLLING_DURATION = Duration.ofMillis(100); @BeforeEach public void beforeTest() { MockitoAnnotations.initMocks(this); } @AfterEach public void afterTest() { Mockito.framework().clearInlineMocks(); } @Host("http: @ServiceInterface(name = "ProvisioningStateLroService") interface ProvisioningStateLroServiceClient { @Put("/resource/1") Mono<Response<Flux<ByteBuffer>>> startLro(Context context); } @Test public void lroBasedOnProvisioningState() { WireMockServer lroServer = startServer(); try { final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class, createHttpPipeline(lroServer.port()), SERIALIZER); PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux = PollerFactory.create(SERIALIZER, new HttpPipelineBuilder().build(), FooWithProvisioningState.class, FooWithProvisioningState.class, POLLING_DURATION, newLroInitFunction(client)); int[] onNextCallCount = new int[1]; lroFlux.doOnNext(response -> { PollResult<FooWithProvisioningState> pollResult = response.getValue(); Assertions.assertNotNull(pollResult); Assertions.assertNotNull(pollResult.getValue()); onNextCallCount[0]++; if (onNextCallCount[0] == 1) { Assertions.assertEquals(LongRunningOperationStatus.IN_PROGRESS, response.getStatus()); Assertions.assertNull(pollResult.getValue().getResourceId()); } else if (onNextCallCount[0] == 2) { Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, response.getStatus()); Assertions.assertNotNull(pollResult.getValue().getResourceId()); } else { throw new IllegalStateException("Poller emitted more than expected value."); } }).blockLast(); } finally { if (lroServer.isRunning()) { lroServer.shutdown(); } } } @Test public void lroBasedOnAsyncOperation() { ServerConfigure serverConfigure = new ServerConfigure(); final String resourceEndpoint = "/resource/1"; final String operationEndpoint = "/operations/1"; ResponseTransformer provisioningStateLroService = new ResponseTransformer() { private final int[] getCallCount = new int[1]; @Override public com.github.tomakehurst.wiremock.http.Response transform(Request request, com.github.tomakehurst.wiremock.http.Response response, FileSource fileSource, Parameters parameters) { if (!request.getUrl().endsWith(resourceEndpoint) && !request.getUrl().endsWith(operationEndpoint)) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .status(500) .body("Unsupported path:" + request.getUrl()) .build(); } if (request.getMethod().isOneOf(RequestMethod.PUT)) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .headers(new HttpHeaders( new HttpHeader("Azure-AsyncOperation", request.getAbsoluteUrl().replace(resourceEndpoint, operationEndpoint)))) .body(toJson(new FooWithProvisioningState("Creating"))) .status(201) .build(); } if (request.getMethod().isOneOf(RequestMethod.GET)) { if (request.getUrl().endsWith(operationEndpoint)) { getCallCount[0]++; if (getCallCount[0] < serverConfigure.pollingCountTillSuccess) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .body("{\"status\": \"InProgress\"}") .build(); } else if (getCallCount[0] == serverConfigure.pollingCountTillSuccess) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .body("{\"status\": \"Succeeded\"}") .build(); } } else if (request.getUrl().endsWith(resourceEndpoint) && getCallCount[0] == serverConfigure.pollingCountTillSuccess) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .body(toJson(new FooWithProvisioningState("Succeeded", UUID.randomUUID().toString()))) .build(); } else { return new com.github.tomakehurst.wiremock.http.Response.Builder() .status(400) .body("Invalid state:" + request.getUrl()) .build(); } } return response; } @Override public String getName() { return "LroService"; } }; WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint, operationEndpoint); lroServer.start(); try { final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class, createHttpPipeline(lroServer.port()), SERIALIZER); PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux = PollerFactory.create(SERIALIZER, new HttpPipelineBuilder().build(), FooWithProvisioningState.class, FooWithProvisioningState.class, POLLING_DURATION, newLroInitFunction(client)); int[] onNextCallCount = new int[1]; AsyncPollResponse<PollResult<FooWithProvisioningState>, FooWithProvisioningState> pollResponse = lroFlux.doOnNext(response -> { PollResult<FooWithProvisioningState> pollResult = response.getValue(); Assertions.assertNotNull(pollResult); Assertions.assertNotNull(pollResult.getValue()); onNextCallCount[0]++; if (onNextCallCount[0] == 1) { Assertions.assertEquals(LongRunningOperationStatus.IN_PROGRESS, response.getStatus()); } else if (onNextCallCount[0] == 2) { Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, response.getStatus()); } else { throw new IllegalStateException("Poller emitted more than expected value."); } }).blockLast(); FooWithProvisioningState foo = pollResponse.getFinalResult().block(); Assertions.assertNotNull(foo.getResourceId()); Assertions.assertEquals("Succeeded", foo.getProvisioningState()); } finally { if (lroServer.isRunning()) { lroServer.shutdown(); } } } @Test @Test public void lroSucceededNoPoll() { final String resourceEndpoint = "/resource/1"; final String sampleVaultUpdateSucceededResponse = "{\"id\":\"/subscriptions/ ResponseTransformer provisioningStateLroService = new ResponseTransformer() { @Override public com.github.tomakehurst.wiremock.http.Response transform(Request request, com.github.tomakehurst.wiremock.http.Response response, FileSource fileSource, Parameters parameters) { if (!request.getUrl().endsWith(resourceEndpoint)) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .status(500) .body("Unsupported path:" + request.getUrl()) .build(); } if (request.getMethod().isOneOf(RequestMethod.PUT)) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .status(200) .body(sampleVaultUpdateSucceededResponse) .build(); } return response; } @Override public String getName() { return "LroService"; } }; WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint); lroServer.start(); try { final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class, createHttpPipeline(lroServer.port()), SERIALIZER); PollerFlux<PollResult<Resource>, Resource> lroFlux = PollerFactory.create(SERIALIZER, new HttpPipelineBuilder().build(), Resource.class, Resource.class, POLLING_DURATION, newLroInitFunction(client)); StepVerifier.create(lroFlux) .expectSubscription() .expectNextMatches(response -> { PollResult<Resource> pollResult = response.getValue(); return response.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED && pollResult != null && pollResult.getValue() != null && pollResult.getValue().id() != null; }).verifyComplete(); AsyncPollResponse<PollResult<Resource>, Resource> asyncPollResponse = lroFlux.blockLast(); Assertions.assertNotNull(asyncPollResponse); Resource result = asyncPollResponse.getFinalResult().block(); Assertions.assertNotNull(result); Assertions.assertNotNull(result.id()); Assertions.assertEquals("v1weidxu", result.name()); Assertions.assertEquals("Microsoft.KeyVault/vaults", result.type()); } finally { if (lroServer.isRunning()) { lroServer.shutdown(); } } } @Test public void lroTimeout() { final Duration timeoutDuration = Duration.ofMillis(1000); final String resourceEndpoint = "/resource/1"; final AtomicInteger getCallCount = new AtomicInteger(0); ResponseTransformer provisioningStateLroService = new ResponseTransformer() { @Override public com.github.tomakehurst.wiremock.http.Response transform(Request request, com.github.tomakehurst.wiremock.http.Response response, FileSource fileSource, Parameters parameters) { if (!request.getUrl().endsWith(resourceEndpoint)) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .status(500) .body("Unsupported path:" + request.getUrl()) .build(); } if (request.getMethod().isOneOf(RequestMethod.PUT, RequestMethod.GET)) { if (request.getMethod().isOneOf(RequestMethod.GET)) { getCallCount.getAndIncrement(); } return new com.github.tomakehurst.wiremock.http.Response.Builder() .body(toJson(new FooWithProvisioningState("IN_PROGRESS"))) .build(); } return response; } @Override public String getName() { return "LroService"; } }; WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint); lroServer.start(); try { final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class, createHttpPipeline(lroServer.port()), SERIALIZER); PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux = PollerFactory.create(SERIALIZER, new HttpPipelineBuilder().build(), FooWithProvisioningState.class, FooWithProvisioningState.class, POLLING_DURATION, newLroInitFunction(client)); Mono<FooWithProvisioningState> resultMonoWithTimeout = lroFlux.last() .flatMap(AsyncPollResponse::getFinalResult) .timeout(timeoutDuration); StepVerifier.create(resultMonoWithTimeout) .thenAwait() .verifyError(TimeoutException.class); int count = getCallCount.get(); try { Thread.sleep(timeoutDuration.toMillis()); } catch (InterruptedException e) { } Assertions.assertEquals(count, getCallCount.get()); } finally { if (lroServer.isRunning()) { lroServer.shutdown(); } } } @Test public void lroRetryAfter() { ServerConfigure configure = new ServerConfigure(); Duration expectedPollingDuration = Duration.ofSeconds(3); configure.pollingCountTillSuccess = 3; configure.additionalHeaders = new HttpHeaders(new HttpHeader("Retry-After", "1")); WireMockServer lroServer = startServer(configure); try { final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class, createHttpPipeline(lroServer.port()), SERIALIZER); PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux = PollerFactory.create(SERIALIZER, new HttpPipelineBuilder().build(), FooWithProvisioningState.class, FooWithProvisioningState.class, POLLING_DURATION, newLroInitFunction(client)); long nanoTime = System.nanoTime(); FooWithProvisioningState result = lroFlux .doOnNext(response -> { System.out.println(String.format("[%s] status %s", OffsetDateTime.now().toString(), response.getStatus().toString())); }).blockLast() .getFinalResult().block(); Assertions.assertNotNull(result); Duration pollingDuration = Duration.ofNanos(System.nanoTime() - nanoTime); Assertions.assertTrue(pollingDuration.compareTo(expectedPollingDuration) > 0); } finally { if (lroServer.isRunning()) { lroServer.shutdown(); } } } @Test public void lroContext() { WireMockServer lroServer = startServer(); HttpPipelinePolicy contextVerifyPolicy = (context, next) -> { Optional<Object> valueOpt = context.getData("key1"); if (valueOpt.isPresent() && "value1".equals(valueOpt.get())) { return next.process(); } else { return Mono.error(new AssertionError()); } }; try { final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class, createHttpPipeline(lroServer.port(), Collections.singletonList(contextVerifyPolicy)), SERIALIZER); Flux<AsyncPollResponse<PollResult<FooWithProvisioningState>, FooWithProvisioningState>> lroFlux = PollerFactory.create(SERIALIZER, new HttpPipelineBuilder().build(), FooWithProvisioningState.class, FooWithProvisioningState.class, POLLING_DURATION, newLroInitFunction(client)); lroFlux = lroFlux.subscriberContext(context -> context.put("key1", "value1")); FooWithProvisioningState result = lroFlux .blockLast() .getFinalResult() .block(); Assertions.assertNotNull(result); } finally { if (lroServer.isRunning()) { lroServer.shutdown(); } } } private static class ServerConfigure { private int pollingCountTillSuccess = 2; private HttpHeaders additionalHeaders = HttpHeaders.noHeaders(); } private static WireMockServer startServer() { return startServer(new ServerConfigure()); } private static WireMockServer startServer(ServerConfigure serverConfigure) { final String resourceEndpoint = "/resource/1"; ResponseTransformer provisioningStateLroService = new ResponseTransformer() { private final int[] getCallCount = new int[1]; @Override public com.github.tomakehurst.wiremock.http.Response transform(Request request, com.github.tomakehurst.wiremock.http.Response response, FileSource fileSource, Parameters parameters) { if (!request.getUrl().endsWith(resourceEndpoint)) { return new com.github.tomakehurst.wiremock.http.Response.Builder() .status(500) .body("Unsupported path:" + request.getUrl()) .build(); } if (request.getMethod().isOneOf(RequestMethod.PUT)) { System.out.println(String.format("[%s] PUT status %s", OffsetDateTime.now().toString(), "IN_PROGRESS")); return new com.github.tomakehurst.wiremock.http.Response.Builder() .headers(serverConfigure.additionalHeaders) .body(toJson(new FooWithProvisioningState("IN_PROGRESS"))) .build(); } if (request.getMethod().isOneOf(RequestMethod.GET)) { getCallCount[0]++; if (getCallCount[0] < serverConfigure.pollingCountTillSuccess) { System.out.println(String.format("[%s] GET status %s", OffsetDateTime.now().toString(), "IN_PROGRESS")); return new com.github.tomakehurst.wiremock.http.Response.Builder() .headers(serverConfigure.additionalHeaders) .body(toJson(new FooWithProvisioningState("IN_PROGRESS"))) .build(); } else if (getCallCount[0] == serverConfigure.pollingCountTillSuccess) { System.out.println(String.format("[%s] GET status %s", OffsetDateTime.now().toString(), "SUCCEEDED")); return new com.github.tomakehurst.wiremock.http.Response.Builder() .body(toJson(new FooWithProvisioningState("SUCCEEDED", UUID.randomUUID().toString()))) .build(); } } return response; } @Override public String getName() { return "LroService"; } }; WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint); lroServer.start(); return lroServer; } private static WireMockServer createServer(ResponseTransformer transformer, String... endpoints) { WireMockServer server = new WireMockServer(WireMockConfiguration .options() .dynamicPort() .extensions(transformer) .disableRequestJournal()); for (String endpoint : endpoints) { server.stubFor(WireMock.any(WireMock.urlEqualTo(endpoint)) .willReturn(WireMock.aResponse())); } return server; } private static HttpPipeline createHttpPipeline(int port) { return createHttpPipeline(port, Collections.emptyList()); } private static HttpPipeline createHttpPipeline(int port, List<HttpPipelinePolicy> additionalPolicies) { List<HttpPipelinePolicy> policies = new ArrayList<>(additionalPolicies); policies.add(new HttpPipelinePolicy() { @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest request = context.getHttpRequest(); request.setUrl(updatePort(request.getUrl(), port)); context.setHttpRequest(request); return next.process(); } private URL updatePort(URL url, int port) { try { return new URL(url.getProtocol(), url.getHost(), port, url.getFile()); } catch (MalformedURLException mue) { throw new RuntimeException(mue); } } }); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } private Mono<Response<Flux<ByteBuffer>>> newLroInitFunction(ProvisioningStateLroServiceClient client) { return FluxUtil.fluxContext(context -> client.startLro(context).flux()).next(); } private static String toJson(Object object) { try { return SERIALIZER.serialize(object, SerializerEncoding.JSON); } catch (IOException ioe) { throw new RuntimeException(ioe); } } }
Why is this logger.warn() and not logger.info(), is this intentional?
public Mono<ShouldRetryResult> shouldRetry(Exception exception) { CosmosException exceptionToThrow = null; Duration backoffTime = Duration.ofSeconds(0); Duration timeout = Duration.ofSeconds(0); boolean forceRefreshAddressCache = false; if (!(exception instanceof GoneException) && !(exception instanceof RetryWithException) && !(exception instanceof PartitionIsMigratingException) && !(exception instanceof InvalidPartitionException && (this.request.getPartitionKeyRangeIdentity() == null || this.request.getPartitionKeyRangeIdentity().getCollectionRid() == null)) && !(exception instanceof PartitionKeyRangeIsSplittingException)) { logger.debug("Operation will NOT be retried. Current attempt {}, Exception: ", this.attemptCount, exception); stopStopWatch(this.durationTimer); return Mono.just(ShouldRetryResult.noRetry()); } else if (exception instanceof RetryWithException) { this.lastRetryWithException = (RetryWithException) exception; } long remainingSeconds = this.waitTimeInSeconds - this.durationTimer.getTime() / 1000; int currentRetryAttemptCount = this.attemptCount; if (this.attemptCount++ > 1) { if (remainingSeconds <= 0) { if (exception instanceof GoneException) { if (this.lastRetryWithException != null) { logger.warn( "Received gone exception after backoff/retry including at least one RetryWithException. " + "Will fail the request with RetryWithException. GoneException: {}. RetryWithException: {}", exception, this.lastRetryWithException); exceptionToThrow = this.lastRetryWithException; } else { logger.warn("Received gone exception after backoff/retry. Will fail the request. {}, short stackTrace = [{}]", exception.toString(), Utils.limitedStackTrace(exception, LIMITED_STACK_TRACE_FAILURE_DEPTH)); exceptionToThrow = BridgeInternal.createServiceUnavailableException(exception); } } else if (exception instanceof PartitionKeyRangeGoneException) { if (this.lastRetryWithException != null) { logger.warn( "Received partition key range gone exception after backoff/retry including at least one RetryWithException." + "Will fail the request with RetryWithException. GoneException: {}. RetryWithException: {}", exception, this.lastRetryWithException); exceptionToThrow = this.lastRetryWithException; } else { logger.warn( "Received partition key range gone exception after backoff/retry. Will fail the request. {}", exception.toString()); exceptionToThrow = BridgeInternal.createServiceUnavailableException(exception); } } else if (exception instanceof InvalidPartitionException) { if (this.lastRetryWithException != null) { logger.warn( "Received InvalidPartitionException after backoff/retry including at least one RetryWithException. " + "Will fail the request with RetryWithException. InvalidPartitionException: {}. RetryWithException: {}", exception, this.lastRetryWithException); } else { logger.warn( "Received invalid collection partition exception after backoff/retry. Will fail the request. {}", exception.toString()); exceptionToThrow = BridgeInternal.createServiceUnavailableException(exception); } } else { logger.warn("Received retrywith exception after backoff/retry. Will fail the request. {}", exception.toString()); } stopStopWatch(this.durationTimer); return Mono.just(ShouldRetryResult.error(exceptionToThrow)); } backoffTime = Duration.ofSeconds(Math.min(Math.min(this.currentBackoffSeconds, remainingSeconds), GoneAndRetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS)); this.currentBackoffSeconds *= GoneAndRetryWithRetryPolicy.BACK_OFF_MULTIPLIER; logger.info("BackoffTime: {} seconds.", backoffTime.getSeconds()); } long timeoutInMillSec = remainingSeconds*1000 - backoffTime.toMillis(); timeout = timeoutInMillSec > 0 ? Duration.ofMillis(timeoutInMillSec) : Duration.ofSeconds(GoneAndRetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS); if (exception instanceof GoneException) { logger.info("Received gone exception, will retry, {}", exception.toString()); forceRefreshAddressCache = true; } else if (exception instanceof PartitionIsMigratingException) { logger.warn("Received PartitionIsMigratingException, will retry, {}", exception.toString()); this.request.forceCollectionRoutingMapRefresh = true; forceRefreshAddressCache = true; } else if (exception instanceof InvalidPartitionException) { this.request.requestContext.quorumSelectedLSN = -1; this.request.requestContext.resolvedPartitionKeyRange = null; this.request.requestContext.quorumSelectedStoreResponse = null; this.request.requestContext.globalCommittedSelectedLSN = -1; if (this.attemptCountInvalidPartition++ > 2) { logger.warn("Received second InvalidPartitionException after backoff/retry. Will fail the request. {}", exception.toString()); return Mono.just(ShouldRetryResult .error(BridgeInternal.createServiceUnavailableException(exception))); } if (this.request != null) { logger.warn("Received invalid collection exception, will retry, {}", exception.toString()); this.request.forceNameCacheRefresh = true; } else { logger.error("Received unexpected invalid collection exception, request should be non-null.", exception); return Mono.just(ShouldRetryResult .error(BridgeInternal.createCosmosException(HttpConstants.StatusCodes.INTERNAL_SERVER_ERROR, exception))); } forceRefreshAddressCache = false; } else if (exception instanceof PartitionKeyRangeIsSplittingException) { this.request.requestContext.resolvedPartitionKeyRange = null; this.request.requestContext.quorumSelectedLSN = -1; this.request.requestContext.quorumSelectedStoreResponse = null; logger.info("Received partition key range splitting exception, will retry, {}", exception.toString()); this.request.forcePartitionKeyRangeRefresh = true; forceRefreshAddressCache = false; } else { logger.warn("Received retrywith exception, will retry, {}", exception); forceRefreshAddressCache = false; } return Mono.just(ShouldRetryResult.retryAfter(backoffTime, Quadruple.with(forceRefreshAddressCache, true, timeout, currentRetryAttemptCount))); }
logger.warn("Received gone exception after backoff/retry. Will fail the request. {}, short stackTrace = [{}]",
public Mono<ShouldRetryResult> shouldRetry(Exception exception) { CosmosException exceptionToThrow = null; Duration backoffTime = Duration.ofSeconds(0); Duration timeout = Duration.ofSeconds(0); boolean forceRefreshAddressCache = false; if (!(exception instanceof GoneException) && !(exception instanceof RetryWithException) && !(exception instanceof PartitionIsMigratingException) && !(exception instanceof InvalidPartitionException && (this.request.getPartitionKeyRangeIdentity() == null || this.request.getPartitionKeyRangeIdentity().getCollectionRid() == null)) && !(exception instanceof PartitionKeyRangeIsSplittingException)) { logger.debug("Operation will NOT be retried. Current attempt {}, Exception: ", this.attemptCount, exception); stopStopWatch(this.durationTimer); return Mono.just(ShouldRetryResult.noRetry()); } else if (exception instanceof RetryWithException) { this.lastRetryWithException = (RetryWithException) exception; } long remainingSeconds = this.waitTimeInSeconds - this.durationTimer.getTime() / 1000; int currentRetryAttemptCount = this.attemptCount; if (this.attemptCount++ > 1) { if (remainingSeconds <= 0) { if (exception instanceof GoneException) { if (this.lastRetryWithException != null) { logger.warn( "Received gone exception after backoff/retry including at least one RetryWithException. " + "Will fail the request with RetryWithException. GoneException: {}. RetryWithException: {}", exception, this.lastRetryWithException); exceptionToThrow = this.lastRetryWithException; } else { logger.warn("Received gone exception after backoff/retry. Will fail the request. {}", exception.toString()); exceptionToThrow = BridgeInternal.createServiceUnavailableException(exception); } } else if (exception instanceof PartitionKeyRangeGoneException) { if (this.lastRetryWithException != null) { logger.warn( "Received partition key range gone exception after backoff/retry including at least one RetryWithException." + "Will fail the request with RetryWithException. GoneException: {}. RetryWithException: {}", exception, this.lastRetryWithException); exceptionToThrow = this.lastRetryWithException; } else { logger.warn( "Received partition key range gone exception after backoff/retry. Will fail the request. {}", exception.toString()); exceptionToThrow = BridgeInternal.createServiceUnavailableException(exception); } } else if (exception instanceof InvalidPartitionException) { if (this.lastRetryWithException != null) { logger.warn( "Received InvalidPartitionException after backoff/retry including at least one RetryWithException. " + "Will fail the request with RetryWithException. InvalidPartitionException: {}. RetryWithException: {}", exception, this.lastRetryWithException); } else { logger.warn( "Received invalid collection partition exception after backoff/retry. Will fail the request. {}", exception.toString()); exceptionToThrow = BridgeInternal.createServiceUnavailableException(exception); } } else { logger.warn("Received retrywith exception after backoff/retry. Will fail the request. {}", exception.toString()); } stopStopWatch(this.durationTimer); return Mono.just(ShouldRetryResult.error(exceptionToThrow)); } backoffTime = Duration.ofSeconds(Math.min(Math.min(this.currentBackoffSeconds, remainingSeconds), GoneAndRetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS)); this.currentBackoffSeconds *= GoneAndRetryWithRetryPolicy.BACK_OFF_MULTIPLIER; logger.info("BackoffTime: {} seconds.", backoffTime.getSeconds()); } long timeoutInMillSec = remainingSeconds*1000 - backoffTime.toMillis(); timeout = timeoutInMillSec > 0 ? Duration.ofMillis(timeoutInMillSec) : Duration.ofSeconds(GoneAndRetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS); if (exception instanceof GoneException) { logger.info("Received gone exception, will retry, {}", exception.toString()); forceRefreshAddressCache = true; } else if (exception instanceof PartitionIsMigratingException) { logger.warn("Received PartitionIsMigratingException, will retry, {}", exception.toString()); this.request.forceCollectionRoutingMapRefresh = true; forceRefreshAddressCache = true; } else if (exception instanceof InvalidPartitionException) { this.request.requestContext.quorumSelectedLSN = -1; this.request.requestContext.resolvedPartitionKeyRange = null; this.request.requestContext.quorumSelectedStoreResponse = null; this.request.requestContext.globalCommittedSelectedLSN = -1; if (this.attemptCountInvalidPartition++ > 2) { logger.warn("Received second InvalidPartitionException after backoff/retry. Will fail the request. {}", exception.toString()); return Mono.just(ShouldRetryResult .error(BridgeInternal.createServiceUnavailableException(exception))); } if (this.request != null) { logger.warn("Received invalid collection exception, will retry, {}", exception.toString()); this.request.forceNameCacheRefresh = true; } else { logger.error("Received unexpected invalid collection exception, request should be non-null.", exception); return Mono.just(ShouldRetryResult .error(BridgeInternal.createCosmosException(HttpConstants.StatusCodes.INTERNAL_SERVER_ERROR, exception))); } forceRefreshAddressCache = false; } else if (exception instanceof PartitionKeyRangeIsSplittingException) { this.request.requestContext.resolvedPartitionKeyRange = null; this.request.requestContext.quorumSelectedLSN = -1; this.request.requestContext.quorumSelectedStoreResponse = null; logger.info("Received partition key range splitting exception, will retry, {}", exception.toString()); this.request.forcePartitionKeyRangeRefresh = true; forceRefreshAddressCache = false; } else { logger.warn("Received retrywith exception, will retry, {}", exception); forceRefreshAddressCache = false; } return Mono.just(ShouldRetryResult.retryAfter(backoffTime, Quadruple.with(forceRefreshAddressCache, true, timeout, currentRetryAttemptCount))); }
class GoneAndRetryWithRetryPolicy extends RetryPolicyWithDiagnostics { private final static Logger logger = LoggerFactory.getLogger(GoneAndRetryWithRetryPolicy.class); private final static int LIMITED_STACK_TRACE_FAILURE_DEPTH = 3; private final static int DEFAULT_WAIT_TIME_IN_SECONDS = 30; private final static int MAXIMUM_BACKOFF_TIME_IN_SECONDS = 15; private final static int INITIAL_BACKOFF_TIME = 1; private final static int BACK_OFF_MULTIPLIER = 2; private final RxDocumentServiceRequest request; private volatile int attemptCount = 1; private volatile int attemptCountInvalidPartition = 1; private volatile int currentBackoffSeconds = GoneAndRetryWithRetryPolicy.INITIAL_BACKOFF_TIME; private volatile RetryWithException lastRetryWithException; private final StopWatch durationTimer = new StopWatch(); private final int waitTimeInSeconds; public final static Quadruple<Boolean, Boolean, Duration, Integer> INITIAL_ARGUMENT_VALUE_POLICY_ARG = Quadruple.with(false, false, Duration.ofSeconds(60), 0); public GoneAndRetryWithRetryPolicy(RxDocumentServiceRequest request, Integer waitTimeInSeconds) { this.request = request; startStopWatch(this.durationTimer); if (waitTimeInSeconds != null) { this.waitTimeInSeconds = waitTimeInSeconds; } else { this.waitTimeInSeconds = DEFAULT_WAIT_TIME_IN_SECONDS; } } @Override private void stopStopWatch(StopWatch stopwatch) { synchronized (stopwatch) { stopwatch.stop(); } } private void startStopWatch(StopWatch stopwatch) { synchronized (stopwatch) { stopwatch.start(); } } }
class GoneAndRetryWithRetryPolicy extends RetryPolicyWithDiagnostics { private final static Logger logger = LoggerFactory.getLogger(GoneAndRetryWithRetryPolicy.class); private final static int DEFAULT_WAIT_TIME_IN_SECONDS = 30; private final static int MAXIMUM_BACKOFF_TIME_IN_SECONDS = 15; private final static int INITIAL_BACKOFF_TIME = 1; private final static int BACK_OFF_MULTIPLIER = 2; private final RxDocumentServiceRequest request; private volatile int attemptCount = 1; private volatile int attemptCountInvalidPartition = 1; private volatile int currentBackoffSeconds = GoneAndRetryWithRetryPolicy.INITIAL_BACKOFF_TIME; private volatile RetryWithException lastRetryWithException; private final StopWatch durationTimer = new StopWatch(); private final int waitTimeInSeconds; public final static Quadruple<Boolean, Boolean, Duration, Integer> INITIAL_ARGUMENT_VALUE_POLICY_ARG = Quadruple.with(false, false, Duration.ofSeconds(60), 0); public GoneAndRetryWithRetryPolicy(RxDocumentServiceRequest request, Integer waitTimeInSeconds) { this.request = request; startStopWatch(this.durationTimer); if (waitTimeInSeconds != null) { this.waitTimeInSeconds = waitTimeInSeconds; } else { this.waitTimeInSeconds = DEFAULT_WAIT_TIME_IN_SECONDS; } } @Override private void stopStopWatch(StopWatch stopwatch) { synchronized (stopwatch) { stopwatch.stop(); } } private void startStopWatch(StopWatch stopwatch) { synchronized (stopwatch) { stopwatch.start(); } } }
after all the retries are exhausted we want to print a warn message indicating failure. however when there are more retry attempt are left we want to print a info message.
public Mono<ShouldRetryResult> shouldRetry(Exception exception) { CosmosException exceptionToThrow = null; Duration backoffTime = Duration.ofSeconds(0); Duration timeout = Duration.ofSeconds(0); boolean forceRefreshAddressCache = false; if (!(exception instanceof GoneException) && !(exception instanceof RetryWithException) && !(exception instanceof PartitionIsMigratingException) && !(exception instanceof InvalidPartitionException && (this.request.getPartitionKeyRangeIdentity() == null || this.request.getPartitionKeyRangeIdentity().getCollectionRid() == null)) && !(exception instanceof PartitionKeyRangeIsSplittingException)) { logger.debug("Operation will NOT be retried. Current attempt {}, Exception: ", this.attemptCount, exception); stopStopWatch(this.durationTimer); return Mono.just(ShouldRetryResult.noRetry()); } else if (exception instanceof RetryWithException) { this.lastRetryWithException = (RetryWithException) exception; } long remainingSeconds = this.waitTimeInSeconds - this.durationTimer.getTime() / 1000; int currentRetryAttemptCount = this.attemptCount; if (this.attemptCount++ > 1) { if (remainingSeconds <= 0) { if (exception instanceof GoneException) { if (this.lastRetryWithException != null) { logger.warn( "Received gone exception after backoff/retry including at least one RetryWithException. " + "Will fail the request with RetryWithException. GoneException: {}. RetryWithException: {}", exception, this.lastRetryWithException); exceptionToThrow = this.lastRetryWithException; } else { logger.warn("Received gone exception after backoff/retry. Will fail the request. {}, short stackTrace = [{}]", exception.toString(), Utils.limitedStackTrace(exception, LIMITED_STACK_TRACE_FAILURE_DEPTH)); exceptionToThrow = BridgeInternal.createServiceUnavailableException(exception); } } else if (exception instanceof PartitionKeyRangeGoneException) { if (this.lastRetryWithException != null) { logger.warn( "Received partition key range gone exception after backoff/retry including at least one RetryWithException." + "Will fail the request with RetryWithException. GoneException: {}. RetryWithException: {}", exception, this.lastRetryWithException); exceptionToThrow = this.lastRetryWithException; } else { logger.warn( "Received partition key range gone exception after backoff/retry. Will fail the request. {}", exception.toString()); exceptionToThrow = BridgeInternal.createServiceUnavailableException(exception); } } else if (exception instanceof InvalidPartitionException) { if (this.lastRetryWithException != null) { logger.warn( "Received InvalidPartitionException after backoff/retry including at least one RetryWithException. " + "Will fail the request with RetryWithException. InvalidPartitionException: {}. RetryWithException: {}", exception, this.lastRetryWithException); } else { logger.warn( "Received invalid collection partition exception after backoff/retry. Will fail the request. {}", exception.toString()); exceptionToThrow = BridgeInternal.createServiceUnavailableException(exception); } } else { logger.warn("Received retrywith exception after backoff/retry. Will fail the request. {}", exception.toString()); } stopStopWatch(this.durationTimer); return Mono.just(ShouldRetryResult.error(exceptionToThrow)); } backoffTime = Duration.ofSeconds(Math.min(Math.min(this.currentBackoffSeconds, remainingSeconds), GoneAndRetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS)); this.currentBackoffSeconds *= GoneAndRetryWithRetryPolicy.BACK_OFF_MULTIPLIER; logger.info("BackoffTime: {} seconds.", backoffTime.getSeconds()); } long timeoutInMillSec = remainingSeconds*1000 - backoffTime.toMillis(); timeout = timeoutInMillSec > 0 ? Duration.ofMillis(timeoutInMillSec) : Duration.ofSeconds(GoneAndRetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS); if (exception instanceof GoneException) { logger.info("Received gone exception, will retry, {}", exception.toString()); forceRefreshAddressCache = true; } else if (exception instanceof PartitionIsMigratingException) { logger.warn("Received PartitionIsMigratingException, will retry, {}", exception.toString()); this.request.forceCollectionRoutingMapRefresh = true; forceRefreshAddressCache = true; } else if (exception instanceof InvalidPartitionException) { this.request.requestContext.quorumSelectedLSN = -1; this.request.requestContext.resolvedPartitionKeyRange = null; this.request.requestContext.quorumSelectedStoreResponse = null; this.request.requestContext.globalCommittedSelectedLSN = -1; if (this.attemptCountInvalidPartition++ > 2) { logger.warn("Received second InvalidPartitionException after backoff/retry. Will fail the request. {}", exception.toString()); return Mono.just(ShouldRetryResult .error(BridgeInternal.createServiceUnavailableException(exception))); } if (this.request != null) { logger.warn("Received invalid collection exception, will retry, {}", exception.toString()); this.request.forceNameCacheRefresh = true; } else { logger.error("Received unexpected invalid collection exception, request should be non-null.", exception); return Mono.just(ShouldRetryResult .error(BridgeInternal.createCosmosException(HttpConstants.StatusCodes.INTERNAL_SERVER_ERROR, exception))); } forceRefreshAddressCache = false; } else if (exception instanceof PartitionKeyRangeIsSplittingException) { this.request.requestContext.resolvedPartitionKeyRange = null; this.request.requestContext.quorumSelectedLSN = -1; this.request.requestContext.quorumSelectedStoreResponse = null; logger.info("Received partition key range splitting exception, will retry, {}", exception.toString()); this.request.forcePartitionKeyRangeRefresh = true; forceRefreshAddressCache = false; } else { logger.warn("Received retrywith exception, will retry, {}", exception); forceRefreshAddressCache = false; } return Mono.just(ShouldRetryResult.retryAfter(backoffTime, Quadruple.with(forceRefreshAddressCache, true, timeout, currentRetryAttemptCount))); }
logger.warn("Received gone exception after backoff/retry. Will fail the request. {}, short stackTrace = [{}]",
public Mono<ShouldRetryResult> shouldRetry(Exception exception) { CosmosException exceptionToThrow = null; Duration backoffTime = Duration.ofSeconds(0); Duration timeout = Duration.ofSeconds(0); boolean forceRefreshAddressCache = false; if (!(exception instanceof GoneException) && !(exception instanceof RetryWithException) && !(exception instanceof PartitionIsMigratingException) && !(exception instanceof InvalidPartitionException && (this.request.getPartitionKeyRangeIdentity() == null || this.request.getPartitionKeyRangeIdentity().getCollectionRid() == null)) && !(exception instanceof PartitionKeyRangeIsSplittingException)) { logger.debug("Operation will NOT be retried. Current attempt {}, Exception: ", this.attemptCount, exception); stopStopWatch(this.durationTimer); return Mono.just(ShouldRetryResult.noRetry()); } else if (exception instanceof RetryWithException) { this.lastRetryWithException = (RetryWithException) exception; } long remainingSeconds = this.waitTimeInSeconds - this.durationTimer.getTime() / 1000; int currentRetryAttemptCount = this.attemptCount; if (this.attemptCount++ > 1) { if (remainingSeconds <= 0) { if (exception instanceof GoneException) { if (this.lastRetryWithException != null) { logger.warn( "Received gone exception after backoff/retry including at least one RetryWithException. " + "Will fail the request with RetryWithException. GoneException: {}. RetryWithException: {}", exception, this.lastRetryWithException); exceptionToThrow = this.lastRetryWithException; } else { logger.warn("Received gone exception after backoff/retry. Will fail the request. {}", exception.toString()); exceptionToThrow = BridgeInternal.createServiceUnavailableException(exception); } } else if (exception instanceof PartitionKeyRangeGoneException) { if (this.lastRetryWithException != null) { logger.warn( "Received partition key range gone exception after backoff/retry including at least one RetryWithException." + "Will fail the request with RetryWithException. GoneException: {}. RetryWithException: {}", exception, this.lastRetryWithException); exceptionToThrow = this.lastRetryWithException; } else { logger.warn( "Received partition key range gone exception after backoff/retry. Will fail the request. {}", exception.toString()); exceptionToThrow = BridgeInternal.createServiceUnavailableException(exception); } } else if (exception instanceof InvalidPartitionException) { if (this.lastRetryWithException != null) { logger.warn( "Received InvalidPartitionException after backoff/retry including at least one RetryWithException. " + "Will fail the request with RetryWithException. InvalidPartitionException: {}. RetryWithException: {}", exception, this.lastRetryWithException); } else { logger.warn( "Received invalid collection partition exception after backoff/retry. Will fail the request. {}", exception.toString()); exceptionToThrow = BridgeInternal.createServiceUnavailableException(exception); } } else { logger.warn("Received retrywith exception after backoff/retry. Will fail the request. {}", exception.toString()); } stopStopWatch(this.durationTimer); return Mono.just(ShouldRetryResult.error(exceptionToThrow)); } backoffTime = Duration.ofSeconds(Math.min(Math.min(this.currentBackoffSeconds, remainingSeconds), GoneAndRetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS)); this.currentBackoffSeconds *= GoneAndRetryWithRetryPolicy.BACK_OFF_MULTIPLIER; logger.info("BackoffTime: {} seconds.", backoffTime.getSeconds()); } long timeoutInMillSec = remainingSeconds*1000 - backoffTime.toMillis(); timeout = timeoutInMillSec > 0 ? Duration.ofMillis(timeoutInMillSec) : Duration.ofSeconds(GoneAndRetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS); if (exception instanceof GoneException) { logger.info("Received gone exception, will retry, {}", exception.toString()); forceRefreshAddressCache = true; } else if (exception instanceof PartitionIsMigratingException) { logger.warn("Received PartitionIsMigratingException, will retry, {}", exception.toString()); this.request.forceCollectionRoutingMapRefresh = true; forceRefreshAddressCache = true; } else if (exception instanceof InvalidPartitionException) { this.request.requestContext.quorumSelectedLSN = -1; this.request.requestContext.resolvedPartitionKeyRange = null; this.request.requestContext.quorumSelectedStoreResponse = null; this.request.requestContext.globalCommittedSelectedLSN = -1; if (this.attemptCountInvalidPartition++ > 2) { logger.warn("Received second InvalidPartitionException after backoff/retry. Will fail the request. {}", exception.toString()); return Mono.just(ShouldRetryResult .error(BridgeInternal.createServiceUnavailableException(exception))); } if (this.request != null) { logger.warn("Received invalid collection exception, will retry, {}", exception.toString()); this.request.forceNameCacheRefresh = true; } else { logger.error("Received unexpected invalid collection exception, request should be non-null.", exception); return Mono.just(ShouldRetryResult .error(BridgeInternal.createCosmosException(HttpConstants.StatusCodes.INTERNAL_SERVER_ERROR, exception))); } forceRefreshAddressCache = false; } else if (exception instanceof PartitionKeyRangeIsSplittingException) { this.request.requestContext.resolvedPartitionKeyRange = null; this.request.requestContext.quorumSelectedLSN = -1; this.request.requestContext.quorumSelectedStoreResponse = null; logger.info("Received partition key range splitting exception, will retry, {}", exception.toString()); this.request.forcePartitionKeyRangeRefresh = true; forceRefreshAddressCache = false; } else { logger.warn("Received retrywith exception, will retry, {}", exception); forceRefreshAddressCache = false; } return Mono.just(ShouldRetryResult.retryAfter(backoffTime, Quadruple.with(forceRefreshAddressCache, true, timeout, currentRetryAttemptCount))); }
class GoneAndRetryWithRetryPolicy extends RetryPolicyWithDiagnostics { private final static Logger logger = LoggerFactory.getLogger(GoneAndRetryWithRetryPolicy.class); private final static int LIMITED_STACK_TRACE_FAILURE_DEPTH = 3; private final static int DEFAULT_WAIT_TIME_IN_SECONDS = 30; private final static int MAXIMUM_BACKOFF_TIME_IN_SECONDS = 15; private final static int INITIAL_BACKOFF_TIME = 1; private final static int BACK_OFF_MULTIPLIER = 2; private final RxDocumentServiceRequest request; private volatile int attemptCount = 1; private volatile int attemptCountInvalidPartition = 1; private volatile int currentBackoffSeconds = GoneAndRetryWithRetryPolicy.INITIAL_BACKOFF_TIME; private volatile RetryWithException lastRetryWithException; private final StopWatch durationTimer = new StopWatch(); private final int waitTimeInSeconds; public final static Quadruple<Boolean, Boolean, Duration, Integer> INITIAL_ARGUMENT_VALUE_POLICY_ARG = Quadruple.with(false, false, Duration.ofSeconds(60), 0); public GoneAndRetryWithRetryPolicy(RxDocumentServiceRequest request, Integer waitTimeInSeconds) { this.request = request; startStopWatch(this.durationTimer); if (waitTimeInSeconds != null) { this.waitTimeInSeconds = waitTimeInSeconds; } else { this.waitTimeInSeconds = DEFAULT_WAIT_TIME_IN_SECONDS; } } @Override private void stopStopWatch(StopWatch stopwatch) { synchronized (stopwatch) { stopwatch.stop(); } } private void startStopWatch(StopWatch stopwatch) { synchronized (stopwatch) { stopwatch.start(); } } }
class GoneAndRetryWithRetryPolicy extends RetryPolicyWithDiagnostics { private final static Logger logger = LoggerFactory.getLogger(GoneAndRetryWithRetryPolicy.class); private final static int DEFAULT_WAIT_TIME_IN_SECONDS = 30; private final static int MAXIMUM_BACKOFF_TIME_IN_SECONDS = 15; private final static int INITIAL_BACKOFF_TIME = 1; private final static int BACK_OFF_MULTIPLIER = 2; private final RxDocumentServiceRequest request; private volatile int attemptCount = 1; private volatile int attemptCountInvalidPartition = 1; private volatile int currentBackoffSeconds = GoneAndRetryWithRetryPolicy.INITIAL_BACKOFF_TIME; private volatile RetryWithException lastRetryWithException; private final StopWatch durationTimer = new StopWatch(); private final int waitTimeInSeconds; public final static Quadruple<Boolean, Boolean, Duration, Integer> INITIAL_ARGUMENT_VALUE_POLICY_ARG = Quadruple.with(false, false, Duration.ofSeconds(60), 0); public GoneAndRetryWithRetryPolicy(RxDocumentServiceRequest request, Integer waitTimeInSeconds) { this.request = request; startStopWatch(this.durationTimer); if (waitTimeInSeconds != null) { this.waitTimeInSeconds = waitTimeInSeconds; } else { this.waitTimeInSeconds = DEFAULT_WAIT_TIME_IN_SECONDS; } } @Override private void stopStopWatch(StopWatch stopwatch) { synchronized (stopwatch) { stopwatch.stop(); } } private void startStopWatch(StopWatch stopwatch) { synchronized (stopwatch) { stopwatch.start(); } } }
`super.beforeTest()` is an empty method, you can remove this.
protected void beforeTest() { super.beforeTest(); String connectionString = interceptorManager.isPlaybackMode() ? "DefaultEndpointsProtocol=https;AccountName=dummyAccount;AccountKey=xyzDummy;EndpointSuffix=core.windows.net" : System.getenv("AZURE_TABLES_CONNECTION_STRING"); StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, new ClientLogger(AzureTableImplTest.class)); StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); TablesSharedKeyCredential sharedKeyCredential = new TablesSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey()); final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new AddDatePolicy()); policies.add(new AddHeadersPolicy(new HttpHeaders().put("Accept", OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA.toString()))); policies.add(new TablesSharedKeyCredentialPolicy(sharedKeyCredential)); policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))); final HttpClient httpClientToUse; if (interceptorManager.isPlaybackMode()) { httpClientToUse = interceptorManager.getPlaybackClient(); } else { httpClientToUse = HttpClient.createDefault(); policies.add(interceptorManager.getRecordPolicy()); policies.add(new RetryPolicy()); } final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(httpClientToUse) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); azureTable = new AzureTableImplBuilder() .pipeline(pipeline) .version("2019-02-02") .url(storageConnectionString.getTableEndpoint().getPrimaryUri()) .buildClient(); }
super.beforeTest();
protected void beforeTest() { String connectionString = interceptorManager.isPlaybackMode() ? "DefaultEndpointsProtocol=https;AccountName=dummyAccount;AccountKey=xyzDummy;EndpointSuffix=core.windows.net" : System.getenv("AZURE_TABLES_CONNECTION_STRING"); StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, new ClientLogger(AzureTableImplTest.class)); Assertions.assertNotNull(connectionString, "Cannot continue test if connectionString is not set."); StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); TablesSharedKeyCredential sharedKeyCredential = new TablesSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey()); List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new AddDatePolicy()); policies.add(new AddHeadersPolicy(new HttpHeaders().put("Accept", OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA.toString()))); policies.add(new TablesSharedKeyCredentialPolicy(sharedKeyCredential)); policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))); HttpClient httpClientToUse; if (interceptorManager.isPlaybackMode()) { httpClientToUse = interceptorManager.getPlaybackClient(); } else { httpClientToUse = HttpClient.createDefault(); policies.add(interceptorManager.getRecordPolicy()); policies.add(new RetryPolicy()); } HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(httpClientToUse) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); azureTable = new AzureTableImplBuilder() .pipeline(pipeline) .version("2019-02-02") .url(storageConnectionString.getTableEndpoint().getPrimaryUri()) .buildClient(); }
class AzureTableImplTest extends TestBase { private static final String PARTITION_KEY = "PartitionKey"; private static final String ROW_KEY = "RowKey"; private static final int TIMEOUT = 5000; private AzureTableImpl azureTable; @Override @Override protected void afterTest() { QueryOptions queryOptions = new QueryOptions() .setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA); Mono.when(azureTable.getTables().queryWithResponseAsync(testResourceNamer.randomUuid(), null, queryOptions, Context.NONE).flatMapMany(tablesQueryResponse -> { return Flux.fromIterable(tablesQueryResponse.getValue().getValue()).flatMap(tableResponseProperty -> { return azureTable.getTables().deleteWithResponseAsync(tableResponseProperty.getTableName(), testResourceNamer.randomUuid(), Context.NONE); }); })).block(); } void createTable(String tableName) { TableProperties tableProperties = new TableProperties().setTableName(tableName); String requestId = testResourceNamer.randomUuid(); azureTable.getTables().createWithResponseAsync(tableProperties, requestId, ResponseFormat.RETURN_CONTENT, null, Context.NONE).block(); } void insertNoETag(String tableName, Map<String, Object> properties) { String requestId = testResourceNamer.randomUuid(); azureTable.getTables().insertEntityWithResponseAsync(tableName, TIMEOUT, requestId, ResponseFormat.RETURN_CONTENT, properties, null, Context.NONE).log().block(); } @Test void createTable() { String tableName = testResourceNamer.randomName("test", 20); TableProperties tableProperties = new TableProperties().setTableName(tableName); int expectedStatusCode = 201; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().createWithResponseAsync(tableProperties, requestId, ResponseFormat.RETURN_CONTENT, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void createTableDuplicateName() { String tableName = testResourceNamer.randomName("test", 20); TableProperties tableProperties = new TableProperties().setTableName(tableName); createTable(tableName); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().createWithResponseAsync(tableProperties, requestId, ResponseFormat.RETURN_CONTENT, null, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void deleteTable() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().deleteWithResponseAsync(tableName, requestId, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteNonExistentTable() { String tableName = testResourceNamer.randomName("test", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().deleteWithResponseAsync(tableName, requestId, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void queryTable() { QueryOptions queryOptions = new QueryOptions() .setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA); String tableA = testResourceNamer.randomName("AtestA", 20); String tableB = testResourceNamer.randomName("BtestB", 20); createTable(tableA); createTable(tableB); int expectedStatusCode = 200; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().queryWithResponseAsync(requestId, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertEquals(response.getValue().getValue().get(0).getTableName(), tableA); Assertions.assertEquals(response.getValue().getValue().get(1).getTableName(), tableB); }) .expectComplete() .verify(); } @Test void queryTablewithTop() { QueryOptions queryOptions = new QueryOptions() .setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA); String tableA = testResourceNamer.randomName("AtestA", 20); String tableB = testResourceNamer.randomName("BtestB", 20); createTable(tableA); createTable(tableB); int expectedStatusCode = 200; int expectedSize = 1; String requestId = testResourceNamer.randomUuid(); queryOptions.setTop(1); StepVerifier.create(azureTable.getTables().queryWithResponseAsync(requestId, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertEquals(expectedSize, response.getValue().getValue().size()); Assertions.assertEquals(tableA, response.getValue().getValue().get(0).getTableName()); }) .expectComplete() .verify(); } @Test void insertNoEtag() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 201; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().insertEntityWithResponseAsync(tableName, TIMEOUT, requestId, ResponseFormat.RETURN_CONTENT, properties, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void mergeEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); insertNoETag(tableName, properties); properties.put("extraProperty", testResourceNamer.randomName("extraProperty", 16)); StepVerifier.create(azureTable.getTables().mergeEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void mergeNonExistentEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().mergeEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void updateEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); insertNoETag(tableName, properties); properties.put("extraProperty", testResourceNamer.randomName("extraProperty", 16)); StepVerifier.create(azureTable.getTables().updateEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void updateNonExistentEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().updateEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void deleteEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); insertNoETag(tableName, properties); StepVerifier.create(azureTable.getTables().deleteEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, "*", TIMEOUT, requestId, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteNonExistentEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().deleteEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, "*", TIMEOUT, requestId, null, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void queryEntity() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> entityA = new HashMap<>(); String partitionKeyEntityA = testResourceNamer.randomName("partitionKeyA", 20); entityA.put(PARTITION_KEY, partitionKeyEntityA); entityA.put(ROW_KEY, testResourceNamer.randomName("rowKeyA", 20)); insertNoETag(tableName, entityA); Map<String, Object> entityB = new HashMap<>(); String partitionKeyEntityB = testResourceNamer.randomName("partitionKeyB", 20); entityB.put(PARTITION_KEY, partitionKeyEntityB); entityB.put(ROW_KEY, testResourceNamer.randomName("rowKeyB", 20)); insertNoETag(tableName, entityB); int expectedStatusCode = 200; StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertEquals(true, response.getValue().getValue().get(0).containsValue(partitionKeyEntityA)); Assertions.assertEquals(true, response.getValue().getValue().get(1).containsValue(partitionKeyEntityB)); }) .expectComplete() .verify(); } @Test void queryEntityWithSelect() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> entityA = new HashMap<>(); String partitionKeyEntityA = testResourceNamer.randomName("partitionKeyA", 20); String rowKeyEntityA = testResourceNamer.randomName("rowKeyA", 20); entityA.put(PARTITION_KEY, partitionKeyEntityA); entityA.put(ROW_KEY, rowKeyEntityA); insertNoETag(tableName, entityA); Map<String, Object> entityB = new HashMap<>(); String partitionKeyEntityB = testResourceNamer.randomName("partitionKeyB", 20); String rowKeyEntityB = testResourceNamer.randomName("rowKeyB", 20); entityB.put(PARTITION_KEY, partitionKeyEntityB); entityB.put(ROW_KEY, rowKeyEntityB); insertNoETag(tableName, entityB); int expectedStatusCode = 200; queryOptions.setSelect(ROW_KEY); StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertEquals(true, response.getValue().getValue().get(0).containsValue(rowKeyEntityA)); Assertions.assertEquals(true, response.getValue().getValue().get(1).containsValue(rowKeyEntityB)); }) .expectComplete() .verify(); } @Test void queryEntityWithFilter() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> entityA = new HashMap<>(); String partitionKeyEntityA = testResourceNamer.randomName("partitionKeyA", 20); entityA.put(PARTITION_KEY, partitionKeyEntityA); entityA.put(ROW_KEY, testResourceNamer.randomName("rowKeyA", 20)); insertNoETag(tableName, entityA); int expectedStatusCode = 200; queryOptions.setSelect(PARTITION_KEY + "eq" + partitionKeyEntityA); StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void queryEntityWithTop() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); int expectedStatusCode = 200; queryOptions.setTop(0); StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void queryEntitiesWithPartitionAndRowKey() { QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); insertNoETag(tableName, properties); StepVerifier.create(azureTable.getTables().queryEntitiesWithPartitionAndRowKeyWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, testResourceNamer.randomUuid(), queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(200, response.getStatusCode()); }) .expectComplete() .verify(); } }
class AzureTableImplTest extends TestBase { private static final String PARTITION_KEY = "PartitionKey"; private static final String ROW_KEY = "RowKey"; private static final int TIMEOUT = 5000; private AzureTableImpl azureTable; @Override @Override protected void afterTest() { QueryOptions queryOptions = new QueryOptions() .setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA); Mono.when(azureTable.getTables().queryWithResponseAsync(testResourceNamer.randomUuid(), null, queryOptions, Context.NONE).flatMapMany(tablesQueryResponse -> { return Flux.fromIterable(tablesQueryResponse.getValue().getValue()).flatMap(tableResponseProperty -> { return azureTable.getTables().deleteWithResponseAsync(tableResponseProperty.getTableName(), testResourceNamer.randomUuid(), Context.NONE); }); })).block(); } void createTable(String tableName) { TableProperties tableProperties = new TableProperties().setTableName(tableName); String requestId = testResourceNamer.randomUuid(); azureTable.getTables().createWithResponseAsync(tableProperties, requestId, ResponseFormat.RETURN_CONTENT, null, Context.NONE).block(); } void insertNoETag(String tableName, Map<String, Object> properties) { String requestId = testResourceNamer.randomUuid(); azureTable.getTables().insertEntityWithResponseAsync(tableName, TIMEOUT, requestId, ResponseFormat.RETURN_CONTENT, properties, null, Context.NONE).log().block(); } @Test void createTable() { String tableName = testResourceNamer.randomName("test", 20); TableProperties tableProperties = new TableProperties().setTableName(tableName); int expectedStatusCode = 201; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().createWithResponseAsync(tableProperties, requestId, ResponseFormat.RETURN_CONTENT, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void createTableDuplicateName() { String tableName = testResourceNamer.randomName("test", 20); TableProperties tableProperties = new TableProperties().setTableName(tableName); createTable(tableName); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().createWithResponseAsync(tableProperties, requestId, ResponseFormat.RETURN_CONTENT, null, Context.NONE)) .expectError(TableServiceErrorException.class) .verify(); } @Test void deleteTable() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().deleteWithResponseAsync(tableName, requestId, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteNonExistentTable() { String tableName = testResourceNamer.randomName("test", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().deleteWithResponseAsync(tableName, requestId, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void queryTable() { QueryOptions queryOptions = new QueryOptions() .setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA); String tableA = testResourceNamer.randomName("AtestA", 20); String tableB = testResourceNamer.randomName("BtestB", 20); createTable(tableA); createTable(tableB); int expectedStatusCode = 200; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().queryWithResponseAsync(requestId, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertNotNull(response.getValue(), "Expected there to be a result."); List<TableResponseProperties> results = response.getValue().getValue(); Assertions.assertNotNull(results, "Expected there to be a set of items."); Assertions.assertEquals(2, results.size()); Assertions.assertEquals(response.getValue().getValue().get(0).getTableName(), tableA); Assertions.assertEquals(response.getValue().getValue().get(1).getTableName(), tableB); }) .expectComplete() .verify(); } @Test void queryTablewithTop() { QueryOptions queryOptions = new QueryOptions() .setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA); String tableA = testResourceNamer.randomName("AtestA", 20); String tableB = testResourceNamer.randomName("BtestB", 20); createTable(tableA); createTable(tableB); int expectedStatusCode = 200; int expectedSize = 1; String requestId = testResourceNamer.randomUuid(); queryOptions.setTop(1); StepVerifier.create(azureTable.getTables().queryWithResponseAsync(requestId, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertEquals(expectedSize, response.getValue().getValue().size()); Assertions.assertEquals(tableA, response.getValue().getValue().get(0).getTableName()); }) .expectComplete() .verify(); } @Test void insertNoEtag() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 201; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().insertEntityWithResponseAsync(tableName, TIMEOUT, requestId, ResponseFormat.RETURN_CONTENT, properties, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void mergeEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); insertNoETag(tableName, properties); properties.put("extraProperty", testResourceNamer.randomName("extraProperty", 16)); StepVerifier.create(azureTable.getTables().mergeEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void mergeNonExistentEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().mergeEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void updateEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); insertNoETag(tableName, properties); properties.put("extraProperty", testResourceNamer.randomName("extraProperty", 16)); StepVerifier.create(azureTable.getTables().updateEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void updateNonExistentEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().updateEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void deleteEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); insertNoETag(tableName, properties); StepVerifier.create(azureTable.getTables().deleteEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, "*", TIMEOUT, requestId, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteNonExistentEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().deleteEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, "*", TIMEOUT, requestId, null, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void queryEntity() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> entityA = new HashMap<>(); String partitionKeyEntityA = testResourceNamer.randomName("partitionKeyA", 20); entityA.put(PARTITION_KEY, partitionKeyEntityA); entityA.put(ROW_KEY, testResourceNamer.randomName("rowKeyA", 20)); insertNoETag(tableName, entityA); Map<String, Object> entityB = new HashMap<>(); String partitionKeyEntityB = testResourceNamer.randomName("partitionKeyB", 20); entityB.put(PARTITION_KEY, partitionKeyEntityB); entityB.put(ROW_KEY, testResourceNamer.randomName("rowKeyB", 20)); insertNoETag(tableName, entityB); int expectedStatusCode = 200; StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertEquals(true, response.getValue().getValue().get(0).containsValue(partitionKeyEntityA)); Assertions.assertEquals(true, response.getValue().getValue().get(1).containsValue(partitionKeyEntityB)); }) .expectComplete() .verify(); } @Test void queryEntityWithSelect() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> entityA = new HashMap<>(); String partitionKeyEntityA = testResourceNamer.randomName("partitionKeyA", 20); String rowKeyEntityA = testResourceNamer.randomName("rowKeyA", 20); entityA.put(PARTITION_KEY, partitionKeyEntityA); entityA.put(ROW_KEY, rowKeyEntityA); insertNoETag(tableName, entityA); Map<String, Object> entityB = new HashMap<>(); String partitionKeyEntityB = testResourceNamer.randomName("partitionKeyB", 20); String rowKeyEntityB = testResourceNamer.randomName("rowKeyB", 20); entityB.put(PARTITION_KEY, partitionKeyEntityB); entityB.put(ROW_KEY, rowKeyEntityB); insertNoETag(tableName, entityB); int expectedStatusCode = 200; queryOptions.setSelect(ROW_KEY); StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertEquals(true, response.getValue().getValue().get(0).containsValue(rowKeyEntityA)); Assertions.assertEquals(true, response.getValue().getValue().get(1).containsValue(rowKeyEntityB)); }) .expectComplete() .verify(); } @Test void queryEntityWithFilter() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> entityA = new HashMap<>(); String partitionKeyEntityA = testResourceNamer.randomName("partitionKeyA", 20); entityA.put(PARTITION_KEY, partitionKeyEntityA); entityA.put(ROW_KEY, testResourceNamer.randomName("rowKeyA", 20)); insertNoETag(tableName, entityA); int expectedStatusCode = 200; queryOptions.setSelect(PARTITION_KEY + "eq" + partitionKeyEntityA); StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void queryEntityWithTop() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); int expectedStatusCode = 200; queryOptions.setTop(0); StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void queryEntitiesWithPartitionAndRowKey() { QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); insertNoETag(tableName, properties); StepVerifier.create(azureTable.getTables().queryEntitiesWithPartitionAndRowKeyWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, testResourceNamer.randomUuid(), queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(200, response.getStatusCode()); }) .expectComplete() .verify(); } }
Remove `this.`
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { String authorizationValue = this.credential.generateAuthorizationHeader(context.getHttpRequest().getUrl(), context.getHttpRequest().getHeaders().toMap()); context.getHttpRequest().setHeader("Authorization", authorizationValue); return next.process(); }
String authorizationValue = this.credential.generateAuthorizationHeader(context.getHttpRequest().getUrl(),
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { String authorizationValue = credential.generateAuthorizationHeader(context.getHttpRequest().getUrl(), context.getHttpRequest().getHeaders().toMap()); context.getHttpRequest().setHeader("Authorization", authorizationValue); return next.process(); }
class TablesSharedKeyCredentialPolicy implements HttpPipelinePolicy { private final TablesSharedKeyCredential credential; /** * constructor for the TablesSharedKeyCredentialPolicy class * * @param credential the credentials of the account */ public TablesSharedKeyCredentialPolicy(TablesSharedKeyCredential credential) { this.credential = credential; } /** * creates an Http response * * @param context the context of the http pipeline * @param next the next Http pipeline policy * @return an Http response */ }
class TablesSharedKeyCredentialPolicy implements HttpPipelinePolicy { private final TablesSharedKeyCredential credential; /** * constructor for the TablesSharedKeyCredentialPolicy class * * @param credential the credentials of the account */ public TablesSharedKeyCredentialPolicy(TablesSharedKeyCredential credential) { this.credential = credential; } /** * creates an Http response * * @param context the context of the http pipeline * @param next the next Http pipeline policy * @return an Http response */ }
This can be simplified to. Same with the line below. ```java this.accountName = Objects.requireNonNull(accountName, "'accountName' cannot be null."); ```
public TablesSharedKeyCredential(String accountName, String accountKey) { Objects.requireNonNull(accountName, "'accountName' cannot be null."); Objects.requireNonNull(accountKey, "'accountKey' cannot be null."); this.accountName = accountName; this.accountKey = accountKey; }
Objects.requireNonNull(accountName, "'accountName' cannot be null.");
public TablesSharedKeyCredential(String accountName, String accountKey) { this.accountName = Objects.requireNonNull(accountName, "'accountName' cannot be null."); this.accountKey = Objects.requireNonNull(accountKey, "'accountKey' cannot be null."); }
class TablesSharedKeyCredential { private static final String AUTHORIZATION_HEADER_FORMAT = "SharedKeyLite %s:%s"; private final String accountName; private final String accountKey; /** * Constructor for TableSharedKeyCredential Class * * @param accountName name of the storage account * @param accountKey key to the storage account */ /** * Generates the Auth Headers * * @param requestURL the URL which the request is going to * @param headers the headers of the request * @return the auth header */ public String generateAuthorizationHeader(URL requestURL, Map<String, String> headers) { String signature = StorageImplUtils.computeHMac256(this.accountKey, this.buildStringToSign(requestURL, headers)); return String.format(AUTHORIZATION_HEADER_FORMAT, this.accountName, signature); } /** * creates the String to Sign * * @param requestURL the URL which the request is going to * @param headers the headers of the request * @return a string to sign for the request */ private String buildStringToSign(URL requestURL, Map<String, String> headers) { String dateHeader = headers.containsKey("x-ms-date") ? "" : this.getStandardHeaderValue(headers, "Date"); return String.join("\n", dateHeader, this.getCanonicalizedResource(requestURL)); } /** * gets necessary headers if the request does not already contain them * * @param headers a map of the headers which the request has * @param headerName the name of the header to get the standard header for * @return the standard header for the given name */ private String getStandardHeaderValue(Map<String, String> headers, String headerName) { String headerValue = headers.get(headerName); return headerValue == null ? "" : headerValue; } /** * returns the canonicalized resource needed for a request * * @param requestURL the url of the request * @return the string that is the canonicalized resource */ private String getCanonicalizedResource(URL requestURL) { StringBuilder canonicalizedResource = new StringBuilder("/"); canonicalizedResource.append(this.accountName); if (requestURL.getPath().length() > 0) { canonicalizedResource.append(requestURL.getPath()); } else { canonicalizedResource.append('/'); } if (requestURL.getQuery() != null) { Map<String, String[]> queryParams = StorageImplUtils.parseQueryStringSplitValues(requestURL.getQuery()); ArrayList<String> queryParamNames = new ArrayList<>(queryParams.keySet()); Collections.sort(queryParamNames); for (String queryParamName : queryParamNames) { String[] queryParamValues = queryParams.get(queryParamName); Arrays.sort(queryParamValues); String queryParamValuesStr = String.join(",", queryParamValues); if (queryParamName.equals("comp")) { canonicalizedResource.append("?").append(queryParamName.toLowerCase(Locale.ROOT)).append("=") .append(queryParamValuesStr); } } } return canonicalizedResource.toString(); } }
class TablesSharedKeyCredential { private static final String AUTHORIZATION_HEADER_FORMAT = "SharedKeyLite %s:%s"; private final String accountName; private final String accountKey; /** * Constructor for TableSharedKeyCredential Class * * @param accountName name of the storage account * @param accountKey key to the storage account */ /** * Generates the Auth Headers * * @param requestUrl the URL which the request is going to * @param headers the headers of the request * @return the auth header */ public String generateAuthorizationHeader(URL requestUrl, Map<String, String> headers) { String signature = StorageImplUtils.computeHMac256(accountKey, buildStringToSign(requestUrl, headers)); return String.format(AUTHORIZATION_HEADER_FORMAT, accountName, signature); } /** * creates the String to Sign * * @param requestUrl the Url which the request is going to * @param headers the headers of the request * @return a string to sign for the request */ private String buildStringToSign(URL requestUrl, Map<String, String> headers) { String dateHeader = headers.containsKey("x-ms-date") ? "" : this.getStandardHeaderValue(headers, "Date"); return String.join("\n", dateHeader, getCanonicalizedResource(requestUrl)); } /** * gets necessary headers if the request does not already contain them * * @param headers a map of the headers which the request has * @param headerName the name of the header to get the standard header for * @return the standard header for the given name */ private String getStandardHeaderValue(Map<String, String> headers, String headerName) { String headerValue = headers.get(headerName); return headerValue == null ? "" : headerValue; } /** * returns the canonicalized resource needed for a request * * @param requestUrl the url of the request * @return the string that is the canonicalized resource */ private String getCanonicalizedResource(URL requestUrl) { StringBuilder canonicalizedResource = new StringBuilder("/").append(accountName); if (requestUrl.getPath().length() > 0) { canonicalizedResource.append(requestUrl.getPath()); } else { canonicalizedResource.append('/'); } if (requestUrl.getQuery() != null) { Map<String, String[]> queryParams = StorageImplUtils.parseQueryStringSplitValues(requestUrl.getQuery()); ArrayList<String> queryParamNames = new ArrayList<>(queryParams.keySet()); Collections.sort(queryParamNames); for (String queryParamName : queryParamNames) { String[] queryParamValues = queryParams.get(queryParamName); Arrays.sort(queryParamValues); String queryParamValuesStr = String.join(",", queryParamValues); if (queryParamName.equalsIgnoreCase("comp")) { canonicalizedResource.append("?").append(queryParamName.toLowerCase(Locale.ROOT)).append("=") .append(queryParamValuesStr); } } } return canonicalizedResource.toString(); } }
same here ... not sure
public static void main(String[] args) throws IOException { FormRecognizerClient client = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildClient(); File analyzeFile = new File("../formrecognizer/azure-ai-formrecognizer/src/samples/java/sample-forms/" + "forms/Form_1.jpg"); List<RecognizedForm> formsWithLabeledModel = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(new FileInputStream(analyzeFile), analyzeFile.length(), "{labeled_model_Id}").setFormContentType(FormContentType.APPLICATION_PDF) .setIncludeTextContent(true).setPollInterval(Duration.ofSeconds(5))).getFinalResult(); List<RecognizedForm> formsWithUnlabeledModel = client.beginRecognizeCustomForms(new FileInputStream(analyzeFile), analyzeFile.length(), "{unlabeled_model_Id}", FormContentType.APPLICATION_PDF).getFinalResult(); System.out.println("--------Recognizing forms with labeled custom model--------"); formsWithLabeledModel.forEach(labeledForm -> labeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); System.out.println("Value for a specific labeled field using the training-time label:"); labeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "MerchantName".equals(formFieldEntry.getKey())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Merchant name is: %s%n", formFieldEntry.getValue())); })); System.out.println("-----------------------------------------------------------"); System.out.println("-------Recognizing forms with unlabeled custom model-------"); formsWithUnlabeledModel.forEach(unLabeledForm -> unLabeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } final StringBuilder boundingBoxLabelStr = new StringBuilder(); if (formField.getLabelText() != null && formField.getLabelText().getBoundingBox() != null) { formField.getLabelText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has label %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getLabelText().getText(), boundingBoxLabelStr, formField.getConfidence()); System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); unLabeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Vendor Name:".equals(formFieldEntry.getValue().getLabelText().getText())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Vendor name is: %s%n", formFieldEntry.getValue())); })); }
public static void main(String[] args) throws IOException { FormRecognizerClient client = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildClient(); File analyzeFile = new File("../formrecognizer/azure-ai-formrecognizer/src/samples/java/sample-forms/" + "forms/Form_1.jpg"); List<RecognizedForm> formsWithLabeledModel = client.beginRecognizeCustomForms( new RecognizeCustomFormsOptions(new FileInputStream(analyzeFile), analyzeFile.length(), "{labeled_model_Id}").setFormContentType(FormContentType.APPLICATION_PDF) .setIncludeTextContent(true).setPollInterval(Duration.ofSeconds(5))).getFinalResult(); List<RecognizedForm> formsWithUnlabeledModel = client.beginRecognizeCustomForms(new FileInputStream(analyzeFile), analyzeFile.length(), "{unlabeled_model_Id}", FormContentType.APPLICATION_PDF).getFinalResult(); System.out.println("--------Recognizing forms with labeled custom model--------"); formsWithLabeledModel.forEach(labeledForm -> labeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); System.out.println("Value for a specific labeled field using the training-time label:"); labeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "MerchantName".equals(formFieldEntry.getKey())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Merchant name is: %s%n", formFieldEntry.getValue())); })); System.out.println("-----------------------------------------------------------"); System.out.println("-------Recognizing forms with unlabeled custom model-------"); formsWithUnlabeledModel.forEach(unLabeledForm -> unLabeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } final StringBuilder boundingBoxLabelStr = new StringBuilder(); if (formField.getLabelText() != null && formField.getLabelText().getBoundingBox() != null) { formField.getLabelText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has label %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getLabelText().getText(), boundingBoxLabelStr, formField.getConfidence()); System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); unLabeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Vendor Name:".equals(formFieldEntry.getValue().getLabelText().getText())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Vendor name is: %s%n", formFieldEntry.getValue())); })); }
class AdvancedDiffLabeledUnlabeledData { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ }
class AdvancedDiffLabeledUnlabeledData { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ }
I'd be consistent about using `final`. If you use final here. I'd expect you to use it in all other cases in your code where you don't expect variables to be reassigned.
protected void beforeTest() { super.beforeTest(); String connectionString = interceptorManager.isPlaybackMode() ? "DefaultEndpointsProtocol=https;AccountName=dummyAccount;AccountKey=xyzDummy;EndpointSuffix=core.windows.net" : System.getenv("AZURE_TABLES_CONNECTION_STRING"); StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, new ClientLogger(AzureTableImplTest.class)); StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); TablesSharedKeyCredential sharedKeyCredential = new TablesSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey()); final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new AddDatePolicy()); policies.add(new AddHeadersPolicy(new HttpHeaders().put("Accept", OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA.toString()))); policies.add(new TablesSharedKeyCredentialPolicy(sharedKeyCredential)); policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))); final HttpClient httpClientToUse; if (interceptorManager.isPlaybackMode()) { httpClientToUse = interceptorManager.getPlaybackClient(); } else { httpClientToUse = HttpClient.createDefault(); policies.add(interceptorManager.getRecordPolicy()); policies.add(new RetryPolicy()); } final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(httpClientToUse) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); azureTable = new AzureTableImplBuilder() .pipeline(pipeline) .version("2019-02-02") .url(storageConnectionString.getTableEndpoint().getPrimaryUri()) .buildClient(); }
final List<HttpPipelinePolicy> policies = new ArrayList<>();
protected void beforeTest() { String connectionString = interceptorManager.isPlaybackMode() ? "DefaultEndpointsProtocol=https;AccountName=dummyAccount;AccountKey=xyzDummy;EndpointSuffix=core.windows.net" : System.getenv("AZURE_TABLES_CONNECTION_STRING"); StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, new ClientLogger(AzureTableImplTest.class)); Assertions.assertNotNull(connectionString, "Cannot continue test if connectionString is not set."); StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); TablesSharedKeyCredential sharedKeyCredential = new TablesSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey()); List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new AddDatePolicy()); policies.add(new AddHeadersPolicy(new HttpHeaders().put("Accept", OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA.toString()))); policies.add(new TablesSharedKeyCredentialPolicy(sharedKeyCredential)); policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))); HttpClient httpClientToUse; if (interceptorManager.isPlaybackMode()) { httpClientToUse = interceptorManager.getPlaybackClient(); } else { httpClientToUse = HttpClient.createDefault(); policies.add(interceptorManager.getRecordPolicy()); policies.add(new RetryPolicy()); } HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(httpClientToUse) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); azureTable = new AzureTableImplBuilder() .pipeline(pipeline) .version("2019-02-02") .url(storageConnectionString.getTableEndpoint().getPrimaryUri()) .buildClient(); }
class AzureTableImplTest extends TestBase { private static final String PARTITION_KEY = "PartitionKey"; private static final String ROW_KEY = "RowKey"; private static final int TIMEOUT = 5000; private AzureTableImpl azureTable; @Override @Override protected void afterTest() { QueryOptions queryOptions = new QueryOptions() .setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA); Mono.when(azureTable.getTables().queryWithResponseAsync(testResourceNamer.randomUuid(), null, queryOptions, Context.NONE).flatMapMany(tablesQueryResponse -> { return Flux.fromIterable(tablesQueryResponse.getValue().getValue()).flatMap(tableResponseProperty -> { return azureTable.getTables().deleteWithResponseAsync(tableResponseProperty.getTableName(), testResourceNamer.randomUuid(), Context.NONE); }); })).block(); } void createTable(String tableName) { TableProperties tableProperties = new TableProperties().setTableName(tableName); String requestId = testResourceNamer.randomUuid(); azureTable.getTables().createWithResponseAsync(tableProperties, requestId, ResponseFormat.RETURN_CONTENT, null, Context.NONE).block(); } void insertNoETag(String tableName, Map<String, Object> properties) { String requestId = testResourceNamer.randomUuid(); azureTable.getTables().insertEntityWithResponseAsync(tableName, TIMEOUT, requestId, ResponseFormat.RETURN_CONTENT, properties, null, Context.NONE).log().block(); } @Test void createTable() { String tableName = testResourceNamer.randomName("test", 20); TableProperties tableProperties = new TableProperties().setTableName(tableName); int expectedStatusCode = 201; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().createWithResponseAsync(tableProperties, requestId, ResponseFormat.RETURN_CONTENT, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void createTableDuplicateName() { String tableName = testResourceNamer.randomName("test", 20); TableProperties tableProperties = new TableProperties().setTableName(tableName); createTable(tableName); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().createWithResponseAsync(tableProperties, requestId, ResponseFormat.RETURN_CONTENT, null, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void deleteTable() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().deleteWithResponseAsync(tableName, requestId, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteNonExistentTable() { String tableName = testResourceNamer.randomName("test", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().deleteWithResponseAsync(tableName, requestId, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void queryTable() { QueryOptions queryOptions = new QueryOptions() .setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA); String tableA = testResourceNamer.randomName("AtestA", 20); String tableB = testResourceNamer.randomName("BtestB", 20); createTable(tableA); createTable(tableB); int expectedStatusCode = 200; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().queryWithResponseAsync(requestId, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertEquals(response.getValue().getValue().get(0).getTableName(), tableA); Assertions.assertEquals(response.getValue().getValue().get(1).getTableName(), tableB); }) .expectComplete() .verify(); } @Test void queryTablewithTop() { QueryOptions queryOptions = new QueryOptions() .setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA); String tableA = testResourceNamer.randomName("AtestA", 20); String tableB = testResourceNamer.randomName("BtestB", 20); createTable(tableA); createTable(tableB); int expectedStatusCode = 200; int expectedSize = 1; String requestId = testResourceNamer.randomUuid(); queryOptions.setTop(1); StepVerifier.create(azureTable.getTables().queryWithResponseAsync(requestId, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertEquals(expectedSize, response.getValue().getValue().size()); Assertions.assertEquals(tableA, response.getValue().getValue().get(0).getTableName()); }) .expectComplete() .verify(); } @Test void insertNoEtag() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 201; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().insertEntityWithResponseAsync(tableName, TIMEOUT, requestId, ResponseFormat.RETURN_CONTENT, properties, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void mergeEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); insertNoETag(tableName, properties); properties.put("extraProperty", testResourceNamer.randomName("extraProperty", 16)); StepVerifier.create(azureTable.getTables().mergeEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void mergeNonExistentEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().mergeEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void updateEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); insertNoETag(tableName, properties); properties.put("extraProperty", testResourceNamer.randomName("extraProperty", 16)); StepVerifier.create(azureTable.getTables().updateEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void updateNonExistentEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().updateEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void deleteEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); insertNoETag(tableName, properties); StepVerifier.create(azureTable.getTables().deleteEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, "*", TIMEOUT, requestId, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteNonExistentEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().deleteEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, "*", TIMEOUT, requestId, null, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void queryEntity() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> entityA = new HashMap<>(); String partitionKeyEntityA = testResourceNamer.randomName("partitionKeyA", 20); entityA.put(PARTITION_KEY, partitionKeyEntityA); entityA.put(ROW_KEY, testResourceNamer.randomName("rowKeyA", 20)); insertNoETag(tableName, entityA); Map<String, Object> entityB = new HashMap<>(); String partitionKeyEntityB = testResourceNamer.randomName("partitionKeyB", 20); entityB.put(PARTITION_KEY, partitionKeyEntityB); entityB.put(ROW_KEY, testResourceNamer.randomName("rowKeyB", 20)); insertNoETag(tableName, entityB); int expectedStatusCode = 200; StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertEquals(true, response.getValue().getValue().get(0).containsValue(partitionKeyEntityA)); Assertions.assertEquals(true, response.getValue().getValue().get(1).containsValue(partitionKeyEntityB)); }) .expectComplete() .verify(); } @Test void queryEntityWithSelect() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> entityA = new HashMap<>(); String partitionKeyEntityA = testResourceNamer.randomName("partitionKeyA", 20); String rowKeyEntityA = testResourceNamer.randomName("rowKeyA", 20); entityA.put(PARTITION_KEY, partitionKeyEntityA); entityA.put(ROW_KEY, rowKeyEntityA); insertNoETag(tableName, entityA); Map<String, Object> entityB = new HashMap<>(); String partitionKeyEntityB = testResourceNamer.randomName("partitionKeyB", 20); String rowKeyEntityB = testResourceNamer.randomName("rowKeyB", 20); entityB.put(PARTITION_KEY, partitionKeyEntityB); entityB.put(ROW_KEY, rowKeyEntityB); insertNoETag(tableName, entityB); int expectedStatusCode = 200; queryOptions.setSelect(ROW_KEY); StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertEquals(true, response.getValue().getValue().get(0).containsValue(rowKeyEntityA)); Assertions.assertEquals(true, response.getValue().getValue().get(1).containsValue(rowKeyEntityB)); }) .expectComplete() .verify(); } @Test void queryEntityWithFilter() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> entityA = new HashMap<>(); String partitionKeyEntityA = testResourceNamer.randomName("partitionKeyA", 20); entityA.put(PARTITION_KEY, partitionKeyEntityA); entityA.put(ROW_KEY, testResourceNamer.randomName("rowKeyA", 20)); insertNoETag(tableName, entityA); int expectedStatusCode = 200; queryOptions.setSelect(PARTITION_KEY + "eq" + partitionKeyEntityA); StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void queryEntityWithTop() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); int expectedStatusCode = 200; queryOptions.setTop(0); StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void queryEntitiesWithPartitionAndRowKey() { QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); insertNoETag(tableName, properties); StepVerifier.create(azureTable.getTables().queryEntitiesWithPartitionAndRowKeyWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, testResourceNamer.randomUuid(), queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(200, response.getStatusCode()); }) .expectComplete() .verify(); } }
class AzureTableImplTest extends TestBase { private static final String PARTITION_KEY = "PartitionKey"; private static final String ROW_KEY = "RowKey"; private static final int TIMEOUT = 5000; private AzureTableImpl azureTable; @Override @Override protected void afterTest() { QueryOptions queryOptions = new QueryOptions() .setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA); Mono.when(azureTable.getTables().queryWithResponseAsync(testResourceNamer.randomUuid(), null, queryOptions, Context.NONE).flatMapMany(tablesQueryResponse -> { return Flux.fromIterable(tablesQueryResponse.getValue().getValue()).flatMap(tableResponseProperty -> { return azureTable.getTables().deleteWithResponseAsync(tableResponseProperty.getTableName(), testResourceNamer.randomUuid(), Context.NONE); }); })).block(); } void createTable(String tableName) { TableProperties tableProperties = new TableProperties().setTableName(tableName); String requestId = testResourceNamer.randomUuid(); azureTable.getTables().createWithResponseAsync(tableProperties, requestId, ResponseFormat.RETURN_CONTENT, null, Context.NONE).block(); } void insertNoETag(String tableName, Map<String, Object> properties) { String requestId = testResourceNamer.randomUuid(); azureTable.getTables().insertEntityWithResponseAsync(tableName, TIMEOUT, requestId, ResponseFormat.RETURN_CONTENT, properties, null, Context.NONE).log().block(); } @Test void createTable() { String tableName = testResourceNamer.randomName("test", 20); TableProperties tableProperties = new TableProperties().setTableName(tableName); int expectedStatusCode = 201; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().createWithResponseAsync(tableProperties, requestId, ResponseFormat.RETURN_CONTENT, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void createTableDuplicateName() { String tableName = testResourceNamer.randomName("test", 20); TableProperties tableProperties = new TableProperties().setTableName(tableName); createTable(tableName); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().createWithResponseAsync(tableProperties, requestId, ResponseFormat.RETURN_CONTENT, null, Context.NONE)) .expectError(TableServiceErrorException.class) .verify(); } @Test void deleteTable() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().deleteWithResponseAsync(tableName, requestId, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteNonExistentTable() { String tableName = testResourceNamer.randomName("test", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().deleteWithResponseAsync(tableName, requestId, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void queryTable() { QueryOptions queryOptions = new QueryOptions() .setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA); String tableA = testResourceNamer.randomName("AtestA", 20); String tableB = testResourceNamer.randomName("BtestB", 20); createTable(tableA); createTable(tableB); int expectedStatusCode = 200; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().queryWithResponseAsync(requestId, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertNotNull(response.getValue(), "Expected there to be a result."); List<TableResponseProperties> results = response.getValue().getValue(); Assertions.assertNotNull(results, "Expected there to be a set of items."); Assertions.assertEquals(2, results.size()); Assertions.assertEquals(response.getValue().getValue().get(0).getTableName(), tableA); Assertions.assertEquals(response.getValue().getValue().get(1).getTableName(), tableB); }) .expectComplete() .verify(); } @Test void queryTablewithTop() { QueryOptions queryOptions = new QueryOptions() .setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA); String tableA = testResourceNamer.randomName("AtestA", 20); String tableB = testResourceNamer.randomName("BtestB", 20); createTable(tableA); createTable(tableB); int expectedStatusCode = 200; int expectedSize = 1; String requestId = testResourceNamer.randomUuid(); queryOptions.setTop(1); StepVerifier.create(azureTable.getTables().queryWithResponseAsync(requestId, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertEquals(expectedSize, response.getValue().getValue().size()); Assertions.assertEquals(tableA, response.getValue().getValue().get(0).getTableName()); }) .expectComplete() .verify(); } @Test void insertNoEtag() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 201; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().insertEntityWithResponseAsync(tableName, TIMEOUT, requestId, ResponseFormat.RETURN_CONTENT, properties, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void mergeEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); insertNoETag(tableName, properties); properties.put("extraProperty", testResourceNamer.randomName("extraProperty", 16)); StepVerifier.create(azureTable.getTables().mergeEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void mergeNonExistentEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().mergeEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void updateEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); insertNoETag(tableName, properties); properties.put("extraProperty", testResourceNamer.randomName("extraProperty", 16)); StepVerifier.create(azureTable.getTables().updateEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void updateNonExistentEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().updateEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void deleteEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); insertNoETag(tableName, properties); StepVerifier.create(azureTable.getTables().deleteEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, "*", TIMEOUT, requestId, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteNonExistentEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().deleteEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, "*", TIMEOUT, requestId, null, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void queryEntity() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> entityA = new HashMap<>(); String partitionKeyEntityA = testResourceNamer.randomName("partitionKeyA", 20); entityA.put(PARTITION_KEY, partitionKeyEntityA); entityA.put(ROW_KEY, testResourceNamer.randomName("rowKeyA", 20)); insertNoETag(tableName, entityA); Map<String, Object> entityB = new HashMap<>(); String partitionKeyEntityB = testResourceNamer.randomName("partitionKeyB", 20); entityB.put(PARTITION_KEY, partitionKeyEntityB); entityB.put(ROW_KEY, testResourceNamer.randomName("rowKeyB", 20)); insertNoETag(tableName, entityB); int expectedStatusCode = 200; StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertEquals(true, response.getValue().getValue().get(0).containsValue(partitionKeyEntityA)); Assertions.assertEquals(true, response.getValue().getValue().get(1).containsValue(partitionKeyEntityB)); }) .expectComplete() .verify(); } @Test void queryEntityWithSelect() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> entityA = new HashMap<>(); String partitionKeyEntityA = testResourceNamer.randomName("partitionKeyA", 20); String rowKeyEntityA = testResourceNamer.randomName("rowKeyA", 20); entityA.put(PARTITION_KEY, partitionKeyEntityA); entityA.put(ROW_KEY, rowKeyEntityA); insertNoETag(tableName, entityA); Map<String, Object> entityB = new HashMap<>(); String partitionKeyEntityB = testResourceNamer.randomName("partitionKeyB", 20); String rowKeyEntityB = testResourceNamer.randomName("rowKeyB", 20); entityB.put(PARTITION_KEY, partitionKeyEntityB); entityB.put(ROW_KEY, rowKeyEntityB); insertNoETag(tableName, entityB); int expectedStatusCode = 200; queryOptions.setSelect(ROW_KEY); StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertEquals(true, response.getValue().getValue().get(0).containsValue(rowKeyEntityA)); Assertions.assertEquals(true, response.getValue().getValue().get(1).containsValue(rowKeyEntityB)); }) .expectComplete() .verify(); } @Test void queryEntityWithFilter() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> entityA = new HashMap<>(); String partitionKeyEntityA = testResourceNamer.randomName("partitionKeyA", 20); entityA.put(PARTITION_KEY, partitionKeyEntityA); entityA.put(ROW_KEY, testResourceNamer.randomName("rowKeyA", 20)); insertNoETag(tableName, entityA); int expectedStatusCode = 200; queryOptions.setSelect(PARTITION_KEY + "eq" + partitionKeyEntityA); StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void queryEntityWithTop() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); int expectedStatusCode = 200; queryOptions.setTop(0); StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void queryEntitiesWithPartitionAndRowKey() { QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); insertNoETag(tableName, properties); StepVerifier.create(azureTable.getTables().queryEntitiesWithPartitionAndRowKeyWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, testResourceNamer.randomUuid(), queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(200, response.getStatusCode()); }) .expectComplete() .verify(); } }
If you import this, it won't need to be fully qualified.
void createTableDuplicateName() { String tableName = testResourceNamer.randomName("test", 20); TableProperties tableProperties = new TableProperties().setTableName(tableName); createTable(tableName); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().createWithResponseAsync(tableProperties, requestId, ResponseFormat.RETURN_CONTENT, null, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); }
.expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class)
void createTableDuplicateName() { String tableName = testResourceNamer.randomName("test", 20); TableProperties tableProperties = new TableProperties().setTableName(tableName); createTable(tableName); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().createWithResponseAsync(tableProperties, requestId, ResponseFormat.RETURN_CONTENT, null, Context.NONE)) .expectError(TableServiceErrorException.class) .verify(); }
class AzureTableImplTest extends TestBase { private static final String PARTITION_KEY = "PartitionKey"; private static final String ROW_KEY = "RowKey"; private static final int TIMEOUT = 5000; private AzureTableImpl azureTable; @Override protected void beforeTest() { String connectionString = interceptorManager.isPlaybackMode() ? "DefaultEndpointsProtocol=https;AccountName=dummyAccount;AccountKey=xyzDummy;EndpointSuffix=core.windows.net" : System.getenv("AZURE_TABLES_CONNECTION_STRING"); StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, new ClientLogger(AzureTableImplTest.class)); StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); TablesSharedKeyCredential sharedKeyCredential = new TablesSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey()); List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new AddDatePolicy()); policies.add(new AddHeadersPolicy(new HttpHeaders().put("Accept", OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA.toString()))); policies.add(new TablesSharedKeyCredentialPolicy(sharedKeyCredential)); policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))); HttpClient httpClientToUse; if (interceptorManager.isPlaybackMode()) { httpClientToUse = interceptorManager.getPlaybackClient(); } else { httpClientToUse = HttpClient.createDefault(); policies.add(interceptorManager.getRecordPolicy()); policies.add(new RetryPolicy()); } HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(httpClientToUse) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); azureTable = new AzureTableImplBuilder() .pipeline(pipeline) .version("2019-02-02") .url(storageConnectionString.getTableEndpoint().getPrimaryUri()) .buildClient(); } @Override protected void afterTest() { QueryOptions queryOptions = new QueryOptions() .setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA); Mono.when(azureTable.getTables().queryWithResponseAsync(testResourceNamer.randomUuid(), null, queryOptions, Context.NONE).flatMapMany(tablesQueryResponse -> { return Flux.fromIterable(tablesQueryResponse.getValue().getValue()).flatMap(tableResponseProperty -> { return azureTable.getTables().deleteWithResponseAsync(tableResponseProperty.getTableName(), testResourceNamer.randomUuid(), Context.NONE); }); })).block(); } void createTable(String tableName) { TableProperties tableProperties = new TableProperties().setTableName(tableName); String requestId = testResourceNamer.randomUuid(); azureTable.getTables().createWithResponseAsync(tableProperties, requestId, ResponseFormat.RETURN_CONTENT, null, Context.NONE).block(); } void insertNoETag(String tableName, Map<String, Object> properties) { String requestId = testResourceNamer.randomUuid(); azureTable.getTables().insertEntityWithResponseAsync(tableName, TIMEOUT, requestId, ResponseFormat.RETURN_CONTENT, properties, null, Context.NONE).log().block(); } @Test void createTable() { String tableName = testResourceNamer.randomName("test", 20); TableProperties tableProperties = new TableProperties().setTableName(tableName); int expectedStatusCode = 201; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().createWithResponseAsync(tableProperties, requestId, ResponseFormat.RETURN_CONTENT, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test @Test void deleteTable() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().deleteWithResponseAsync(tableName, requestId, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteNonExistentTable() { String tableName = testResourceNamer.randomName("test", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().deleteWithResponseAsync(tableName, requestId, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void queryTable() { QueryOptions queryOptions = new QueryOptions() .setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA); String tableA = testResourceNamer.randomName("AtestA", 20); String tableB = testResourceNamer.randomName("BtestB", 20); createTable(tableA); createTable(tableB); int expectedStatusCode = 200; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().queryWithResponseAsync(requestId, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertEquals(response.getValue().getValue().get(0).getTableName(), tableA); Assertions.assertEquals(response.getValue().getValue().get(1).getTableName(), tableB); }) .expectComplete() .verify(); } @Test void queryTablewithTop() { QueryOptions queryOptions = new QueryOptions() .setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA); String tableA = testResourceNamer.randomName("AtestA", 20); String tableB = testResourceNamer.randomName("BtestB", 20); createTable(tableA); createTable(tableB); int expectedStatusCode = 200; int expectedSize = 1; String requestId = testResourceNamer.randomUuid(); queryOptions.setTop(1); StepVerifier.create(azureTable.getTables().queryWithResponseAsync(requestId, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertEquals(expectedSize, response.getValue().getValue().size()); Assertions.assertEquals(tableA, response.getValue().getValue().get(0).getTableName()); }) .expectComplete() .verify(); } @Test void insertNoEtag() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 201; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().insertEntityWithResponseAsync(tableName, TIMEOUT, requestId, ResponseFormat.RETURN_CONTENT, properties, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void mergeEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); insertNoETag(tableName, properties); properties.put("extraProperty", testResourceNamer.randomName("extraProperty", 16)); StepVerifier.create(azureTable.getTables().mergeEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void mergeNonExistentEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().mergeEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void updateEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); insertNoETag(tableName, properties); properties.put("extraProperty", testResourceNamer.randomName("extraProperty", 16)); StepVerifier.create(azureTable.getTables().updateEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void updateNonExistentEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().updateEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void deleteEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); insertNoETag(tableName, properties); StepVerifier.create(azureTable.getTables().deleteEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, "*", TIMEOUT, requestId, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteNonExistentEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().deleteEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, "*", TIMEOUT, requestId, null, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void queryEntity() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> entityA = new HashMap<>(); String partitionKeyEntityA = testResourceNamer.randomName("partitionKeyA", 20); entityA.put(PARTITION_KEY, partitionKeyEntityA); entityA.put(ROW_KEY, testResourceNamer.randomName("rowKeyA", 20)); insertNoETag(tableName, entityA); Map<String, Object> entityB = new HashMap<>(); String partitionKeyEntityB = testResourceNamer.randomName("partitionKeyB", 20); entityB.put(PARTITION_KEY, partitionKeyEntityB); entityB.put(ROW_KEY, testResourceNamer.randomName("rowKeyB", 20)); insertNoETag(tableName, entityB); int expectedStatusCode = 200; StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertEquals(true, response.getValue().getValue().get(0).containsValue(partitionKeyEntityA)); Assertions.assertEquals(true, response.getValue().getValue().get(1).containsValue(partitionKeyEntityB)); }) .expectComplete() .verify(); } @Test void queryEntityWithSelect() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> entityA = new HashMap<>(); String partitionKeyEntityA = testResourceNamer.randomName("partitionKeyA", 20); String rowKeyEntityA = testResourceNamer.randomName("rowKeyA", 20); entityA.put(PARTITION_KEY, partitionKeyEntityA); entityA.put(ROW_KEY, rowKeyEntityA); insertNoETag(tableName, entityA); Map<String, Object> entityB = new HashMap<>(); String partitionKeyEntityB = testResourceNamer.randomName("partitionKeyB", 20); String rowKeyEntityB = testResourceNamer.randomName("rowKeyB", 20); entityB.put(PARTITION_KEY, partitionKeyEntityB); entityB.put(ROW_KEY, rowKeyEntityB); insertNoETag(tableName, entityB); int expectedStatusCode = 200; queryOptions.setSelect(ROW_KEY); StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertEquals(true, response.getValue().getValue().get(0).containsValue(rowKeyEntityA)); Assertions.assertEquals(true, response.getValue().getValue().get(1).containsValue(rowKeyEntityB)); }) .expectComplete() .verify(); } @Test void queryEntityWithFilter() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> entityA = new HashMap<>(); String partitionKeyEntityA = testResourceNamer.randomName("partitionKeyA", 20); entityA.put(PARTITION_KEY, partitionKeyEntityA); entityA.put(ROW_KEY, testResourceNamer.randomName("rowKeyA", 20)); insertNoETag(tableName, entityA); int expectedStatusCode = 200; queryOptions.setSelect(PARTITION_KEY + "eq" + partitionKeyEntityA); StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void queryEntityWithTop() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); int expectedStatusCode = 200; queryOptions.setTop(0); StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void queryEntitiesWithPartitionAndRowKey() { QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); insertNoETag(tableName, properties); StepVerifier.create(azureTable.getTables().queryEntitiesWithPartitionAndRowKeyWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, testResourceNamer.randomUuid(), queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(200, response.getStatusCode()); }) .expectComplete() .verify(); } }
class AzureTableImplTest extends TestBase { private static final String PARTITION_KEY = "PartitionKey"; private static final String ROW_KEY = "RowKey"; private static final int TIMEOUT = 5000; private AzureTableImpl azureTable; @Override protected void beforeTest() { String connectionString = interceptorManager.isPlaybackMode() ? "DefaultEndpointsProtocol=https;AccountName=dummyAccount;AccountKey=xyzDummy;EndpointSuffix=core.windows.net" : System.getenv("AZURE_TABLES_CONNECTION_STRING"); StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, new ClientLogger(AzureTableImplTest.class)); Assertions.assertNotNull(connectionString, "Cannot continue test if connectionString is not set."); StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); TablesSharedKeyCredential sharedKeyCredential = new TablesSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey()); List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new AddDatePolicy()); policies.add(new AddHeadersPolicy(new HttpHeaders().put("Accept", OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA.toString()))); policies.add(new TablesSharedKeyCredentialPolicy(sharedKeyCredential)); policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))); HttpClient httpClientToUse; if (interceptorManager.isPlaybackMode()) { httpClientToUse = interceptorManager.getPlaybackClient(); } else { httpClientToUse = HttpClient.createDefault(); policies.add(interceptorManager.getRecordPolicy()); policies.add(new RetryPolicy()); } HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(httpClientToUse) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); azureTable = new AzureTableImplBuilder() .pipeline(pipeline) .version("2019-02-02") .url(storageConnectionString.getTableEndpoint().getPrimaryUri()) .buildClient(); } @Override protected void afterTest() { QueryOptions queryOptions = new QueryOptions() .setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA); Mono.when(azureTable.getTables().queryWithResponseAsync(testResourceNamer.randomUuid(), null, queryOptions, Context.NONE).flatMapMany(tablesQueryResponse -> { return Flux.fromIterable(tablesQueryResponse.getValue().getValue()).flatMap(tableResponseProperty -> { return azureTable.getTables().deleteWithResponseAsync(tableResponseProperty.getTableName(), testResourceNamer.randomUuid(), Context.NONE); }); })).block(); } void createTable(String tableName) { TableProperties tableProperties = new TableProperties().setTableName(tableName); String requestId = testResourceNamer.randomUuid(); azureTable.getTables().createWithResponseAsync(tableProperties, requestId, ResponseFormat.RETURN_CONTENT, null, Context.NONE).block(); } void insertNoETag(String tableName, Map<String, Object> properties) { String requestId = testResourceNamer.randomUuid(); azureTable.getTables().insertEntityWithResponseAsync(tableName, TIMEOUT, requestId, ResponseFormat.RETURN_CONTENT, properties, null, Context.NONE).log().block(); } @Test void createTable() { String tableName = testResourceNamer.randomName("test", 20); TableProperties tableProperties = new TableProperties().setTableName(tableName); int expectedStatusCode = 201; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().createWithResponseAsync(tableProperties, requestId, ResponseFormat.RETURN_CONTENT, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test @Test void deleteTable() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().deleteWithResponseAsync(tableName, requestId, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteNonExistentTable() { String tableName = testResourceNamer.randomName("test", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().deleteWithResponseAsync(tableName, requestId, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void queryTable() { QueryOptions queryOptions = new QueryOptions() .setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA); String tableA = testResourceNamer.randomName("AtestA", 20); String tableB = testResourceNamer.randomName("BtestB", 20); createTable(tableA); createTable(tableB); int expectedStatusCode = 200; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().queryWithResponseAsync(requestId, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertNotNull(response.getValue(), "Expected there to be a result."); List<TableResponseProperties> results = response.getValue().getValue(); Assertions.assertNotNull(results, "Expected there to be a set of items."); Assertions.assertEquals(2, results.size()); Assertions.assertEquals(response.getValue().getValue().get(0).getTableName(), tableA); Assertions.assertEquals(response.getValue().getValue().get(1).getTableName(), tableB); }) .expectComplete() .verify(); } @Test void queryTablewithTop() { QueryOptions queryOptions = new QueryOptions() .setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_MINIMALMETADATA); String tableA = testResourceNamer.randomName("AtestA", 20); String tableB = testResourceNamer.randomName("BtestB", 20); createTable(tableA); createTable(tableB); int expectedStatusCode = 200; int expectedSize = 1; String requestId = testResourceNamer.randomUuid(); queryOptions.setTop(1); StepVerifier.create(azureTable.getTables().queryWithResponseAsync(requestId, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertEquals(expectedSize, response.getValue().getValue().size()); Assertions.assertEquals(tableA, response.getValue().getValue().get(0).getTableName()); }) .expectComplete() .verify(); } @Test void insertNoEtag() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 201; String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().insertEntityWithResponseAsync(tableName, TIMEOUT, requestId, ResponseFormat.RETURN_CONTENT, properties, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void mergeEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); insertNoETag(tableName, properties); properties.put("extraProperty", testResourceNamer.randomName("extraProperty", 16)); StepVerifier.create(azureTable.getTables().mergeEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void mergeNonExistentEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().mergeEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void updateEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); insertNoETag(tableName, properties); properties.put("extraProperty", testResourceNamer.randomName("extraProperty", 16)); StepVerifier.create(azureTable.getTables().updateEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void updateNonExistentEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().updateEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, requestId, "*", properties, null, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void deleteEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); int expectedStatusCode = 204; String requestId = testResourceNamer.randomUuid(); insertNoETag(tableName, properties); StepVerifier.create(azureTable.getTables().deleteEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, "*", TIMEOUT, requestId, null, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteNonExistentEntity() { String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); String requestId = testResourceNamer.randomUuid(); StepVerifier.create(azureTable.getTables().deleteEntityWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, "*", TIMEOUT, requestId, null, Context.NONE)) .expectError(com.azure.data.tables.implementation.models.TableServiceErrorException.class) .verify(); } @Test void queryEntity() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> entityA = new HashMap<>(); String partitionKeyEntityA = testResourceNamer.randomName("partitionKeyA", 20); entityA.put(PARTITION_KEY, partitionKeyEntityA); entityA.put(ROW_KEY, testResourceNamer.randomName("rowKeyA", 20)); insertNoETag(tableName, entityA); Map<String, Object> entityB = new HashMap<>(); String partitionKeyEntityB = testResourceNamer.randomName("partitionKeyB", 20); entityB.put(PARTITION_KEY, partitionKeyEntityB); entityB.put(ROW_KEY, testResourceNamer.randomName("rowKeyB", 20)); insertNoETag(tableName, entityB); int expectedStatusCode = 200; StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertEquals(true, response.getValue().getValue().get(0).containsValue(partitionKeyEntityA)); Assertions.assertEquals(true, response.getValue().getValue().get(1).containsValue(partitionKeyEntityB)); }) .expectComplete() .verify(); } @Test void queryEntityWithSelect() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> entityA = new HashMap<>(); String partitionKeyEntityA = testResourceNamer.randomName("partitionKeyA", 20); String rowKeyEntityA = testResourceNamer.randomName("rowKeyA", 20); entityA.put(PARTITION_KEY, partitionKeyEntityA); entityA.put(ROW_KEY, rowKeyEntityA); insertNoETag(tableName, entityA); Map<String, Object> entityB = new HashMap<>(); String partitionKeyEntityB = testResourceNamer.randomName("partitionKeyB", 20); String rowKeyEntityB = testResourceNamer.randomName("rowKeyB", 20); entityB.put(PARTITION_KEY, partitionKeyEntityB); entityB.put(ROW_KEY, rowKeyEntityB); insertNoETag(tableName, entityB); int expectedStatusCode = 200; queryOptions.setSelect(ROW_KEY); StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); Assertions.assertEquals(true, response.getValue().getValue().get(0).containsValue(rowKeyEntityA)); Assertions.assertEquals(true, response.getValue().getValue().get(1).containsValue(rowKeyEntityB)); }) .expectComplete() .verify(); } @Test void queryEntityWithFilter() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> entityA = new HashMap<>(); String partitionKeyEntityA = testResourceNamer.randomName("partitionKeyA", 20); entityA.put(PARTITION_KEY, partitionKeyEntityA); entityA.put(ROW_KEY, testResourceNamer.randomName("rowKeyA", 20)); insertNoETag(tableName, entityA); int expectedStatusCode = 200; queryOptions.setSelect(PARTITION_KEY + "eq" + partitionKeyEntityA); StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void queryEntityWithTop() { String requestId = testResourceNamer.randomUuid(); QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); int expectedStatusCode = 200; queryOptions.setTop(0); StepVerifier.create(azureTable.getTables().queryEntitiesWithResponseAsync(tableName, TIMEOUT, requestId, null, null, queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void queryEntitiesWithPartitionAndRowKey() { QueryOptions queryOptions = new QueryOptions().setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); String tableName = testResourceNamer.randomName("test", 20); createTable(tableName); Map<String, Object> properties = new HashMap<>(); String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); String rowKeyValue = testResourceNamer.randomName("rowKey", 20); properties.put(PARTITION_KEY, partitionKeyValue); properties.put(ROW_KEY, rowKeyValue); insertNoETag(tableName, properties); StepVerifier.create(azureTable.getTables().queryEntitiesWithPartitionAndRowKeyWithResponseAsync(tableName, partitionKeyValue, rowKeyValue, TIMEOUT, testResourceNamer.randomUuid(), queryOptions, Context.NONE)) .assertNext(response -> { Assertions.assertEquals(200, response.getStatusCode()); }) .expectComplete() .verify(); } }
This should be removed.
public Mono<Void> complete(String lockToken) { /*if (lockToken instanceof ServiceBusReceivedMessage) { return complete(lockToken, ((ServiceBusReceivedMessage) lockToken).getSessionId()); } else { return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null, null, null, null); }*/ return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null, null, null, null); }
/*if (lockToken instanceof ServiceBusReceivedMessage) {
public Mono<Void> complete(String lockToken) { return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null, null, receiverOptions.getSessionId(), null); }
class ServiceBusReceiverAsyncClient implements AutoCloseable { private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions(); private static final String TRANSACTION_LINK_NAME = "coordinator"; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final MessageLockContainer managementNodeLocks; private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClient.class); private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final UnnamedSessionManager unnamedSessionManager; private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1); private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>(); /** * Creates a receiver that listens to a Service Bus resource. * * @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource. * @param entityPath The name of the topic or queue. * @param entityType The type of the Service Bus resource. * @param receiverOptions Options when receiving messages. * @param connectionProcessor The AMQP connection to the Service Bus resource. * @param tracerProvider Tracer for telemetry. * @param messageSerializer Serializes and deserializes Service Bus messages. * @param onClientClose Operation to run when the client completes. */ ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.managementNodeLocks = new MessageLockContainer(cleanupInterval); this.unnamedSessionManager = null; } ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose, UnnamedSessionManager unnamedSessionManager) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unnamedSessionManager = Objects.requireNonNull(unnamedSessionManager, "'sessionManager' cannot be null."); this.managementNodeLocks = new MessageLockContainer(cleanupInterval); } /** * Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Service Bus namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Service Bus resource this client interacts with. * * @return The Service Bus resource this client interacts with. */ public String getEntityPath() { return entityPath; } /** * Abandon a {@link ServiceBusReceivedMessage message} with its lock token. This will make the message available * again for processing. Abandoning a message will increase the delivery count on the message. * * @param lockToken Lock token of the message. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. */ public Mono<Void> abandon(String lockToken) { return abandon(lockToken, receiverOptions.getSessionId()); } /** * Abandon a {@link ServiceBusReceivedMessage message} with its lock token. This will make the message available * again for processing. Abandoning a message will increase the delivery count on the message. * * @param lockToken Lock token of the message. * @param sessionId Session id of the message to abandon. {@code null} if there is no session. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. */ public Mono<Void> abandon(String lockToken, String sessionId) { return abandon(lockToken, null, sessionId); } /** * Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties. * This will make the message available again for processing. Abandoning a message will increase the delivery count * on the message. * * @param lockToken Lock token of the message. * @param propertiesToModify Properties to modify on the message. * * @return A {@link Mono} that completes when the Service Bus operation finishes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. */ public Mono<Void> abandon(String lockToken, Map<String, Object> propertiesToModify) { return abandon(lockToken, propertiesToModify, receiverOptions.getSessionId()); } /** * Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties. * This will make the message available again for processing. Abandoning a message will increase the delivery count * on the message. * <p><strong>Complete a message with a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.abandonMessageWithTransaction} * * @param lockToken Lock token of the message. * @param propertiesToModify Properties to modify on the message. * @param transactionContext in which this operation is taking part in. The transaction should be created first by * {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the Service Bus operation finishes. * @throws NullPointerException if {@code lockToken}, {@code transactionContext} or * {@code transactionContext.transactionId} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. */ public Mono<Void> abandon(String lockToken, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { return abandon(lockToken, propertiesToModify, receiverOptions.getSessionId(), transactionContext); } /** * Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties. * This will make the message available again for processing. Abandoning a message will increase the delivery count * on the message. * * @param lockToken Lock token of the message. * @param propertiesToModify Properties to modify on the message. * @param sessionId Session id of the message to abandon. {@code null} if there is no session. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. */ public Mono<Void> abandon(String lockToken, Map<String, Object> propertiesToModify, String sessionId) { return updateDisposition(lockToken, DispositionStatus.ABANDONED, null, null, propertiesToModify, sessionId, null); } /** * Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties. * This will make the message available again for processing. Abandoning a message will increase the delivery count * on the message. * * @param lockToken Lock token of the message. * @param propertiesToModify Properties to modify on the message. * @param sessionId Session id of the message to abandon. {@code null} if there is no session. * @param transactionContext in which this operation is taking part in. The transaction should be created first by * {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * @throws NullPointerException if {@code lockToken}, {@code transactionContext} or * {@code transactionContext.transactionId} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. */ public Mono<Void> abandon(String lockToken, Map<String, Object> propertiesToModify, String sessionId, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return updateDisposition(lockToken, DispositionStatus.ABANDONED, null, null, propertiesToModify, sessionId, transactionContext); } /** * Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the * service. * * @param lockToken Lock token of the message. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. */ /** * Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the * service. * <p><strong>Complete a message with a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.completeMessageWithTransaction} * * @param lockToken Lock token of the message. * @param transactionContext in which this operation is taking part in. The transaction should be created first by * {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code lockToken}, {@code transactionContext} or * {@code transactionContext.transactionId} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. */ public Mono<Void> complete(String lockToken, ServiceBusTransactionContext transactionContext) { return complete(lockToken, receiverOptions.getSessionId(), transactionContext); } /** * Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the * service. * * @param lockToken Lock token of the message. * @param sessionId Session id of the message to complete. {@code null} if there is no session. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. */ public Mono<Void> complete(String lockToken, String sessionId) { return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null, null, sessionId, null); } /** * Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the * service. * * @param lockToken Lock token of the message. * @param sessionId Session id of the message to complete. {@code null} if there is no session. * @param transactionContext in which this operation is taking part in. The transaction should be created first by * {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code lockToken}, {@code transactionContext} or * {@code transactionContext.transactionId} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. */ public Mono<Void> complete(String lockToken, String sessionId, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null, null, sessionId, transactionContext); } /** * Defers a {@link ServiceBusReceivedMessage message} using its lock token. This will move message into the deferred * subqueue. * * @param lockToken Lock token of the message. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. * @see <a href="https: */ public Mono<Void> defer(String lockToken) { return defer(lockToken, receiverOptions.getSessionId()); } /** * Defers a {@link ServiceBusReceivedMessage message} using its lock token. This will move message into the deferred * subqueue. * * @param lockToken Lock token of the message. * @param sessionId Session id of the message to defer. {@code null} if there is no session. * * @return A {@link Mono} that completes when the defer operation finishes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. * @see <a href="https: */ public Mono<Void> defer(String lockToken, String sessionId) { return defer(lockToken, null, sessionId); } /** * Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will * move message into the deferred subqueue. * * @param lockToken Lock token of the message. * @param propertiesToModify Message properties to modify. * * @return A {@link Mono} that completes when the defer operation finishes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. * @see <a href="https: */ public Mono<Void> defer(String lockToken, Map<String, Object> propertiesToModify) { return defer(lockToken, propertiesToModify, receiverOptions.getSessionId()); } /** * Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will * move message into the deferred subqueue. * * @param lockToken Lock token of the message. * @param propertiesToModify Message properties to modify. * @param transactionContext in which this operation is taking part in. The transaction should be created first by * {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the defer operation finishes. * @throws NullPointerException if {@code lockToken}, {@code transactionContext} or * {@code transactionContext.transactionId} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. * @see <a href="https: */ public Mono<Void> defer(String lockToken, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { return defer(lockToken, propertiesToModify, receiverOptions.getSessionId(), transactionContext); } /** * Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will * move message into the deferred subqueue. * * @param lockToken Lock token of the message. * @param propertiesToModify Message properties to modify. * @param sessionId Session id of the message to defer. {@code null} if there is no session. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. * @see <a href="https: */ public Mono<Void> defer(String lockToken, Map<String, Object> propertiesToModify, String sessionId) { return updateDisposition(lockToken, DispositionStatus.DEFERRED, null, null, propertiesToModify, sessionId, null); } /** * Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will * move message into the deferred subqueue. * * @param lockToken Lock token of the message. * @param propertiesToModify Message properties to modify. * @param sessionId Session id of the message to defer. {@code null} if there is no session. * @param transactionContext in which this operation is taking part in. The transaction should be created first by * {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * @throws NullPointerException if {@code lockToken}, {@code transactionContext} or * {@code transactionContext.transactionId} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. * @see <a href="https: */ public Mono<Void> defer(String lockToken, Map<String, Object> propertiesToModify, String sessionId, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return updateDisposition(lockToken, DispositionStatus.DEFERRED, null, null, propertiesToModify, sessionId, transactionContext); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param lockToken Lock token of the message. * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(String lockToken) { return deadLetter(lockToken, receiverOptions.getSessionId()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param lockToken Lock token of the message. * @param sessionId Session id of the message to deadletter. {@code null} if there is no session. * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(String lockToken, String sessionId) { return deadLetter(lockToken, DEFAULT_DEAD_LETTER_OPTIONS, sessionId); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param lockToken Lock token of the message. * @param sessionId Session id of the message to deadletter. {@code null} if there is no session. * @param transactionContext in which this operation is taking part in. The transaction should be created first by * {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code lockToken}, {@code transactionContext} or * {@code transactionContext.transactionId} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException iif {@code lockToken} is {@code null} or an empty value. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(String lockToken, String sessionId, ServiceBusTransactionContext transactionContext) { return deadLetter(lockToken, DEFAULT_DEAD_LETTER_OPTIONS, sessionId, transactionContext); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error * description, and/or modified properties. * * @param lockToken Lock token of the message. * @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue. * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code lockToken} or {@code deadLetterOptions} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. */ public Mono<Void> deadLetter(String lockToken, DeadLetterOptions deadLetterOptions) { return deadLetter(lockToken, deadLetterOptions, receiverOptions.getSessionId()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error * description, and/or modified properties. * * @param lockToken Lock token of the message. * @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue. * @param transactionContext in which this operation is taking part in. The transaction should be created first by * {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code lockToken}, {@code deadLetterOptions}, {@code transactionContext} or * {@code transactionContext.transactionId} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. */ public Mono<Void> deadLetter(String lockToken, DeadLetterOptions deadLetterOptions, ServiceBusTransactionContext transactionContext) { return deadLetter(lockToken, deadLetterOptions, receiverOptions.getSessionId(), transactionContext); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error * description, and/or modified properties. * * @param lockToken Lock token of the message. * @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue. * @param sessionId Session id of the message to deadletter. {@code null} if there is no session. * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. */ public Mono<Void> deadLetter(String lockToken, DeadLetterOptions deadLetterOptions, String sessionId) { if (Objects.isNull(deadLetterOptions)) { return monoError(logger, new NullPointerException("'deadLetterOptions' cannot be null.")); } return updateDisposition(lockToken, DispositionStatus.SUSPENDED, deadLetterOptions.getDeadLetterReason(), deadLetterOptions.getDeadLetterErrorDescription(), deadLetterOptions.getPropertiesToModify(), sessionId, null); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error * description, and/or modified properties. * * @param lockToken Lock token of the message. * @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue. * @param sessionId Session id of the message to deadletter. {@code null} if there is no session. * @param transactionContext in which this operation is taking part in. The transaction should be created first by * {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. */ public Mono<Void> deadLetter(String lockToken, DeadLetterOptions deadLetterOptions, String sessionId, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return updateDisposition(lockToken, DispositionStatus.SUSPENDED, deadLetterOptions.getDeadLetterReason(), deadLetterOptions.getDeadLetterErrorDescription(), deadLetterOptions.getPropertiesToModify(), sessionId, transactionContext); } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<byte[]> getSessionState(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot get session state on a non-session receiver.")); } if (unnamedSessionManager != null) { return unnamedSessionManager.getSessionState(sessionId); } else { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId))); } } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peek() { return peek(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peek(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> { final long sequence = lastPeekedSequenceNumber.get() + 1; logger.verbose("Peek message from sequence number: {}", sequence); return channel.peek(sequence, sessionId, getLinkName(sessionId)); }) .handle((message, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, message.getSequenceNumber())); logger.verbose("Updating last peeked sequence number: {}", current); sink.next(message); }); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekAt(long sequenceNumber) { return peekAt(sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekAt(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId))); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages) { return peekBatch(maxMessages, receiverOptions.getSessionId()); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> { final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1; logger.verbose("Peek batch from sequence number: {}", nextSequenceNumber); final Flux<ServiceBusReceivedMessage> messages = node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages); final Mono<ServiceBusReceivedMessage> handle = messages .switchIfEmpty(Mono.fromCallable(() -> { ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(new byte[0]); emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get()); return emptyMessage; })) .last() .handle((last, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, last.getSequenceNumber())); logger.verbose("Last peeked sequence number in batch: {}", current); sink.complete(); }); return Flux.merge(messages, handle); }); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekBatchAt(int maxMessages, long sequenceNumber) { return peekBatchAt(maxMessages, sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekBatchAt(int maxMessages, long sequenceNumber, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages)); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus * entity. This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ public Flux<ServiceBusReceivedMessageContext> receive() { if (unnamedSessionManager != null) { return unnamedSessionManager.receive(); } else { return getOrCreateConsumer().receive().map(ServiceBusReceivedMessageContext::new); } } /** * Receives a bounded stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. This stream * receives either {@code maxNumberOfMessages} are received or the {@code maxWaitTime} has elapsed. * * @param maxNumberOfMessages Maximum number of messages to receive. * @param maxWaitTime Maximum time to wait. * * @return A bounded {@link Flux} of messages. * @throws NullPointerException if {@code maxWaitTime} is null. * @throws IllegalArgumentException if {@code maxNumberOfMessages} is less than 1. {@code maxWaitTime} is zero * or a negative duration. */ public Flux<ServiceBusReceivedMessageContext> receive(int maxNumberOfMessages, Duration maxWaitTime) { if (maxNumberOfMessages < 1) { return fluxError(logger, new IllegalArgumentException("'maxNumberOfMessages' cannot be less than 1.")); } else if (maxWaitTime == null) { return fluxError(logger, new NullPointerException("'maxWaitTime' cannot be null.")); } else if (maxWaitTime.isNegative() || maxWaitTime.isZero()) { return fluxError(logger, new NullPointerException("'maxWaitTime' cannot be negative or zero.")); } return receive().take(maxNumberOfMessages).take(maxWaitTime); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * * @return A deferred message with the matching {@code sequenceNumber}. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) { return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * @param sessionId Session id of the deferred message. {@code null} if there is no session. * * @return A deferred message with the matching {@code sequenceNumber}. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last()) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil())); } return receivedMessage; }); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * * @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(Iterable<Long> sequenceNumbers) { return receiveDeferredMessageBatch(sequenceNumbers, receiverOptions.getSessionId()); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * @param sessionId Session id of the deferred messages. {@code null} if there is no session. * * @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(Iterable<Long> sequenceNumbers, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), sequenceNumbers)) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil())); } return receivedMessage; }); } /** * Asynchronously renews the lock on the specified message. The lock will be renewed based on the setting specified * on the entity. When a message is received in {@link ReceiveMode * server for this receiver instance for a duration as specified during the Queue creation (LockDuration). If * processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the * lock is reset to the entity's LockDuration value. * * @param lockToken Lock token of the message to renew. * * @return The new expiration time for the message. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalStateException if the receiver is a session receiver. * @throws IllegalArgumentException if {@code lockToken} is {@code null} or an empty value. */ public Mono<Instant> renewMessageLock(String lockToken) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } else if (Objects.isNull(lockToken)) { return monoError(logger, new NullPointerException("'lockToken' cannot be null.")); } else if (lockToken.isEmpty()) { return monoError(logger, new IllegalArgumentException("'lockToken' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", lockToken))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, getLinkName(null))) .map(instant -> managementNodeLocks.addOrUpdate(lockToken, instant)); } /** * Sets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<Instant> renewSessionLock(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot renew session lock on a non-session receiver.")); } final String linkName = unnamedSessionManager != null ? unnamedSessionManager.getLinkName(sessionId) : null; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.renewSessionLock(sessionId, linkName)); } /** * Sets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * @param sessionState State to set on the session. * * @return A Mono that completes when the session is set * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<Void> setSessionState(String sessionId, byte[] sessionState) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot set session state on a non-session receiver.")); } final String linkName = unnamedSessionManager != null ? unnamedSessionManager.getLinkName(sessionId) : null; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName)); } /** * Starts a new service side transaction. The {@link ServiceBusTransactionContext} should be passed to all * operations that needs to be in this transaction. * * <p><strong>Create a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.createTransaction} * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())); } /** * Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * <p><strong>Commit a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.commitTransaction} * * @param transactionContext to be committed. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * <p><strong>Rollback a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.rollbackTransaction} * * @param transactionContext to be rollbacked. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Disposes of the consumer by closing the underlying connection to the service. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } logger.info("Removing receiver links."); final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null); if (disposed != null) { disposed.close(); } if (unnamedSessionManager != null) { unnamedSessionManager.close(); } onClientClose.run(); } /** * Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are * held by the management node when they are received from the management node or management operations are * performed using that {@code lockToken}. * * @param lockToken Lock token to check for. * * @return {@code true} if the management node contains the lock token and false otherwise. */ private boolean isManagementToken(String lockToken) { return managementNodeLocks.contains(lockToken); } private Mono<Void> updateDisposition(String lockToken, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, String sessionId, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue()))); } else if (Objects.isNull(lockToken)) { return monoError(logger, new NullPointerException("'lockToken' cannot be null.")); } else if (lockToken.isEmpty()) { return monoError(logger, new IllegalArgumentException("'lockToken' cannot be empty.")); } if (receiverOptions.getReceiveMode() != ReceiveMode.PEEK_LOCK) { return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format( "'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus)))); } final String sessionIdToUse; /*if (message instanceof ServiceBusReceivedMessage) { sessionIdToUse = ((ServiceBusReceivedMessage) message).getSessionId(); if (!CoreUtils.isNullOrEmpty(sessionIdToUse) && !CoreUtils.isNullOrEmpty(sessionId) && !sessionIdToUse.equals(sessionId)) { logger.warning("Given sessionId '{}' does not match message's sessionId '{}'", sessionId, sessionIdToUse); } } else */if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) { sessionIdToUse = receiverOptions.getSessionId(); } else { sessionIdToUse = sessionId; } logger.info("{}: Update started. Disposition: {}. Lock: {}. SessionId {}.", entityPath, dispositionStatus, lockToken, sessionIdToUse); final Mono<Void> performOnManagement = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext)) .then(Mono.fromRunnable(() -> { logger.info("{}: Management node Update completed. Disposition: {}. Lock: {}.", entityPath, dispositionStatus, lockToken); managementNodeLocks.remove(lockToken); })); if (unnamedSessionManager != null) { return unnamedSessionManager.updateDisposition(lockToken, sessionId, dispositionStatus, propertiesToModify, deadLetterReason, deadLetterErrorDescription, transactionContext) .flatMap(isSuccess -> { if (isSuccess) { return Mono.empty(); } logger.info("Could not perform on session manger. Performing on management node."); return performOnManagement; }); } final ServiceBusAsyncConsumer existingConsumer = consumer.get(); if (isManagementToken(lockToken) || existingConsumer == null) { return performOnManagement; } else { return existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, transactionContext) .then(Mono.fromRunnable(() -> logger.info("{}: Update completed. Disposition: {}. Lock: {}.", entityPath, dispositionStatus, lockToken))); } } private ServiceBusAsyncConsumer getOrCreateConsumer() { final ServiceBusAsyncConsumer existing = consumer.get(); if (existing != null) { return existing; } final String linkName = StringUtil.getRandomString(entityPath); logger.info("{}: Creating consumer for link '{}'", entityPath, linkName); final Flux<ServiceBusReceiveLink> receiveLink = connectionProcessor.flatMap(connection -> { if (receiverOptions.isSessionReceiver()) { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, receiverOptions.getSessionId()); } else { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType); } }) .doOnNext(next -> { final String format = "Created consumer for Service Bus resource: [{}] mode: [{}]" + " sessionEnabled? {} transferEntityPath: [{}], entityType: [{}]"; logger.verbose(format, next.getEntityPath(), receiverOptions.getReceiveMode(), CoreUtils.isNullOrEmpty(receiverOptions.getSessionId()), "N/A", entityType); }) .repeat(); final LinkErrorContext context = new LinkErrorContext(fullyQualifiedNamespace, entityPath, linkName, null); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions()); final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLink.subscribeWith( new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy, connectionProcessor, context)); final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, false, receiverOptions.autoLockRenewalEnabled(), receiverOptions.getMaxAutoLockRenewalDuration(), connectionProcessor.getRetryOptions(), (token, associatedLinkName) -> renewMessageLock(token, associatedLinkName)); if (consumer.compareAndSet(null, newConsumer)) { return newConsumer; } else { newConsumer.close(); return consumer.get(); } } /** * @return receiver options set by user; */ ReceiverOptions getReceiverOptions() { return receiverOptions; } /** * Renews the message lock, and updates its value in the container. */ private Mono<Instant> renewMessageLock(String lockToken, String linkName) { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, linkName)); } /** * If the receiver has not connected via {@link * the management node. * * @return The name of the receive link, or null of it has not connected via a receive link. */ private String getLinkName(String sessionId) { if (unnamedSessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) { return unnamedSessionManager.getLinkName(sessionId); } else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) { return null; } else { final ServiceBusAsyncConsumer existing = consumer.get(); return existing != null ? existing.getLinkName() : null; } } }
class ServiceBusReceiverAsyncClient implements AutoCloseable { private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions(); private static final String TRANSACTION_LINK_NAME = "coordinator"; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final MessageLockContainer managementNodeLocks; private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClient.class); private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final UnnamedSessionManager unnamedSessionManager; private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1); private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>(); /** * Creates a receiver that listens to a Service Bus resource. * * @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource. * @param entityPath The name of the topic or queue. * @param entityType The type of the Service Bus resource. * @param receiverOptions Options when receiving messages. * @param connectionProcessor The AMQP connection to the Service Bus resource. * @param tracerProvider Tracer for telemetry. * @param messageSerializer Serializes and deserializes Service Bus messages. * @param onClientClose Operation to run when the client completes. */ ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.managementNodeLocks = new MessageLockContainer(cleanupInterval); this.unnamedSessionManager = null; } ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose, UnnamedSessionManager unnamedSessionManager) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unnamedSessionManager = Objects.requireNonNull(unnamedSessionManager, "'sessionManager' cannot be null."); this.managementNodeLocks = new MessageLockContainer(cleanupInterval); } /** * Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Service Bus namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Service Bus resource this client interacts with. * * @return The Service Bus resource this client interacts with. */ public String getEntityPath() { return entityPath; } /** * Abandon a {@link ServiceBusReceivedMessage message} with its lock token. This will make the message available * again for processing. Abandoning a message will increase the delivery count on the message. * * @param lockToken Lock token of the message. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. */ public Mono<Void> abandon(String lockToken) { return abandon(lockToken, receiverOptions.getSessionId()); } /** * Abandon a {@link ServiceBusReceivedMessage message} with its lock token. This will make the message available * again for processing. Abandoning a message will increase the delivery count on the message. * * @param lockToken Lock token of the message. * @param sessionId Session id of the message to abandon. {@code null} if there is no session. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. */ public Mono<Void> abandon(String lockToken, String sessionId) { return abandon(lockToken, null, sessionId); } /** * Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties. * This will make the message available again for processing. Abandoning a message will increase the delivery count * on the message. * * @param lockToken Lock token of the message. * @param propertiesToModify Properties to modify on the message. * * @return A {@link Mono} that completes when the Service Bus operation finishes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. */ public Mono<Void> abandon(String lockToken, Map<String, Object> propertiesToModify) { return abandon(lockToken, propertiesToModify, receiverOptions.getSessionId()); } /** * Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties. * This will make the message available again for processing. Abandoning a message will increase the delivery count * on the message. * <p><strong>Complete a message with a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.abandonMessageWithTransaction} * * @param lockToken Lock token of the message. * @param propertiesToModify Properties to modify on the message. * @param transactionContext in which this operation is taking part in. The transaction should be created first by * {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the Service Bus operation finishes. * @throws NullPointerException if {@code lockToken}, {@code transactionContext} or * {@code transactionContext.transactionId} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. */ public Mono<Void> abandon(String lockToken, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { return abandon(lockToken, propertiesToModify, receiverOptions.getSessionId(), transactionContext); } /** * Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties. * This will make the message available again for processing. Abandoning a message will increase the delivery count * on the message. * * @param lockToken Lock token of the message. * @param propertiesToModify Properties to modify on the message. * @param sessionId Session id of the message to abandon. {@code null} if there is no session. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. */ public Mono<Void> abandon(String lockToken, Map<String, Object> propertiesToModify, String sessionId) { return updateDisposition(lockToken, DispositionStatus.ABANDONED, null, null, propertiesToModify, sessionId, null); } /** * Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties. * This will make the message available again for processing. Abandoning a message will increase the delivery count * on the message. * * @param lockToken Lock token of the message. * @param propertiesToModify Properties to modify on the message. * @param sessionId Session id of the message to abandon. {@code null} if there is no session. * @param transactionContext in which this operation is taking part in. The transaction should be created first by * {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * @throws NullPointerException if {@code lockToken}, {@code transactionContext} or * {@code transactionContext.transactionId} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. */ public Mono<Void> abandon(String lockToken, Map<String, Object> propertiesToModify, String sessionId, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return updateDisposition(lockToken, DispositionStatus.ABANDONED, null, null, propertiesToModify, sessionId, transactionContext); } /** * Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the * service. * * @param lockToken Lock token of the message. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. */ /** * Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the * service. * <p><strong>Complete a message with a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.completeMessageWithTransaction} * * @param lockToken Lock token of the message. * @param transactionContext in which this operation is taking part in. The transaction should be created first by * {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code lockToken}, {@code transactionContext} or * {@code transactionContext.transactionId} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. */ public Mono<Void> complete(String lockToken, ServiceBusTransactionContext transactionContext) { return complete(lockToken, receiverOptions.getSessionId(), transactionContext); } /** * Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the * service. * * @param lockToken Lock token of the message. * @param sessionId Session id of the message to complete. {@code null} if there is no session. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. */ public Mono<Void> complete(String lockToken, String sessionId) { return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null, null, sessionId, null); } /** * Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the * service. * * @param lockToken Lock token of the message. * @param sessionId Session id of the message to complete. {@code null} if there is no session. * @param transactionContext in which this operation is taking part in. The transaction should be created first by * {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code lockToken}, {@code transactionContext} or * {@code transactionContext.transactionId} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. */ public Mono<Void> complete(String lockToken, String sessionId, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null, null, sessionId, transactionContext); } /** * Defers a {@link ServiceBusReceivedMessage message} using its lock token. This will move message into the deferred * subqueue. * * @param lockToken Lock token of the message. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. * @see <a href="https: */ public Mono<Void> defer(String lockToken) { return defer(lockToken, receiverOptions.getSessionId()); } /** * Defers a {@link ServiceBusReceivedMessage message} using its lock token. This will move message into the deferred * subqueue. * * @param lockToken Lock token of the message. * @param sessionId Session id of the message to defer. {@code null} if there is no session. * * @return A {@link Mono} that completes when the defer operation finishes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. * @see <a href="https: */ public Mono<Void> defer(String lockToken, String sessionId) { return defer(lockToken, null, sessionId); } /** * Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will * move message into the deferred subqueue. * * @param lockToken Lock token of the message. * @param propertiesToModify Message properties to modify. * * @return A {@link Mono} that completes when the defer operation finishes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. * @see <a href="https: */ public Mono<Void> defer(String lockToken, Map<String, Object> propertiesToModify) { return defer(lockToken, propertiesToModify, receiverOptions.getSessionId()); } /** * Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will * move message into the deferred subqueue. * * @param lockToken Lock token of the message. * @param propertiesToModify Message properties to modify. * @param transactionContext in which this operation is taking part in. The transaction should be created first by * {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the defer operation finishes. * @throws NullPointerException if {@code lockToken}, {@code transactionContext} or * {@code transactionContext.transactionId} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. * @see <a href="https: */ public Mono<Void> defer(String lockToken, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { return defer(lockToken, propertiesToModify, receiverOptions.getSessionId(), transactionContext); } /** * Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will * move message into the deferred subqueue. * * @param lockToken Lock token of the message. * @param propertiesToModify Message properties to modify. * @param sessionId Session id of the message to defer. {@code null} if there is no session. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. * @see <a href="https: */ public Mono<Void> defer(String lockToken, Map<String, Object> propertiesToModify, String sessionId) { return updateDisposition(lockToken, DispositionStatus.DEFERRED, null, null, propertiesToModify, sessionId, null); } /** * Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will * move message into the deferred subqueue. * * @param lockToken Lock token of the message. * @param propertiesToModify Message properties to modify. * @param sessionId Session id of the message to defer. {@code null} if there is no session. * @param transactionContext in which this operation is taking part in. The transaction should be created first by * {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * @throws NullPointerException if {@code lockToken}, {@code transactionContext} or * {@code transactionContext.transactionId} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. * @see <a href="https: */ public Mono<Void> defer(String lockToken, Map<String, Object> propertiesToModify, String sessionId, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return updateDisposition(lockToken, DispositionStatus.DEFERRED, null, null, propertiesToModify, sessionId, transactionContext); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param lockToken Lock token of the message. * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(String lockToken) { return deadLetter(lockToken, receiverOptions.getSessionId()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param lockToken Lock token of the message. * @param sessionId Session id of the message to deadletter. {@code null} if there is no session. * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(String lockToken, String sessionId) { return deadLetter(lockToken, DEFAULT_DEAD_LETTER_OPTIONS, sessionId); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param lockToken Lock token of the message. * @param sessionId Session id of the message to deadletter. {@code null} if there is no session. * @param transactionContext in which this operation is taking part in. The transaction should be created first by * {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code lockToken}, {@code transactionContext} or * {@code transactionContext.transactionId} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(String lockToken, String sessionId, ServiceBusTransactionContext transactionContext) { return deadLetter(lockToken, DEFAULT_DEAD_LETTER_OPTIONS, sessionId, transactionContext); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error * description, and/or modified properties. * * @param lockToken Lock token of the message. * @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue. * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code lockToken} or {@code deadLetterOptions} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. */ public Mono<Void> deadLetter(String lockToken, DeadLetterOptions deadLetterOptions) { return deadLetter(lockToken, deadLetterOptions, receiverOptions.getSessionId()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error * description, and/or modified properties. * * @param lockToken Lock token of the message. * @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue. * @param transactionContext in which this operation is taking part in. The transaction should be created first by * {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code lockToken}, {@code deadLetterOptions}, {@code transactionContext} or * {@code transactionContext.transactionId} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. */ public Mono<Void> deadLetter(String lockToken, DeadLetterOptions deadLetterOptions, ServiceBusTransactionContext transactionContext) { return deadLetter(lockToken, deadLetterOptions, receiverOptions.getSessionId(), transactionContext); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error * description, and/or modified properties. * * @param lockToken Lock token of the message. * @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue. * @param sessionId Session id of the message to deadletter. {@code null} if there is no session. * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. */ public Mono<Void> deadLetter(String lockToken, DeadLetterOptions deadLetterOptions, String sessionId) { if (Objects.isNull(deadLetterOptions)) { return monoError(logger, new NullPointerException("'deadLetterOptions' cannot be null.")); } return updateDisposition(lockToken, DispositionStatus.SUSPENDED, deadLetterOptions.getDeadLetterReason(), deadLetterOptions.getDeadLetterErrorDescription(), deadLetterOptions.getPropertiesToModify(), sessionId, null); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error * description, and/or modified properties. * * @param lockToken Lock token of the message. * @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue. * @param sessionId Session id of the message to deadletter. {@code null} if there is no session. * @param transactionContext in which this operation is taking part in. The transaction should be created first by * {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalArgumentException if {@code lockToken} is an empty value. */ public Mono<Void> deadLetter(String lockToken, DeadLetterOptions deadLetterOptions, String sessionId, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return updateDisposition(lockToken, DispositionStatus.SUSPENDED, deadLetterOptions.getDeadLetterReason(), deadLetterOptions.getDeadLetterErrorDescription(), deadLetterOptions.getPropertiesToModify(), sessionId, transactionContext); } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<byte[]> getSessionState(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot get session state on a non-session receiver.")); } if (unnamedSessionManager != null) { return unnamedSessionManager.getSessionState(sessionId); } else { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId))); } } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peek() { return peek(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peek(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> { final long sequence = lastPeekedSequenceNumber.get() + 1; logger.verbose("Peek message from sequence number: {}", sequence); return channel.peek(sequence, sessionId, getLinkName(sessionId)); }) .handle((message, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, message.getSequenceNumber())); logger.verbose("Updating last peeked sequence number: {}", current); sink.next(message); }); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekAt(long sequenceNumber) { return peekAt(sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekAt(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId))); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages) { return peekBatch(maxMessages, receiverOptions.getSessionId()); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> { final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1; logger.verbose("Peek batch from sequence number: {}", nextSequenceNumber); final Flux<ServiceBusReceivedMessage> messages = node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages); final Mono<ServiceBusReceivedMessage> handle = messages .switchIfEmpty(Mono.fromCallable(() -> { ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(new byte[0]); emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get()); return emptyMessage; })) .last() .handle((last, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, last.getSequenceNumber())); logger.verbose("Last peeked sequence number in batch: {}", current); sink.complete(); }); return Flux.merge(messages, handle); }); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekBatchAt(int maxMessages, long sequenceNumber) { return peekBatchAt(maxMessages, sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekBatchAt(int maxMessages, long sequenceNumber, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages)); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus * entity. This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ public Flux<ServiceBusReceivedMessageContext> receive() { if (unnamedSessionManager != null) { return unnamedSessionManager.receive(); } else { return getOrCreateConsumer().receive().map(ServiceBusReceivedMessageContext::new); } } /** * Receives a bounded stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. This stream * receives either {@code maxNumberOfMessages} are received or the {@code maxWaitTime} has elapsed. * * @param maxNumberOfMessages Maximum number of messages to receive. * @param maxWaitTime Maximum time to wait. * * @return A bounded {@link Flux} of messages. * @throws NullPointerException if {@code maxWaitTime} is null. * @throws IllegalArgumentException if {@code maxNumberOfMessages} is less than 1. {@code maxWaitTime} is zero * or a negative duration. */ public Flux<ServiceBusReceivedMessageContext> receive(int maxNumberOfMessages, Duration maxWaitTime) { if (maxNumberOfMessages < 1) { return fluxError(logger, new IllegalArgumentException("'maxNumberOfMessages' cannot be less than 1.")); } else if (maxWaitTime == null) { return fluxError(logger, new NullPointerException("'maxWaitTime' cannot be null.")); } else if (maxWaitTime.isNegative() || maxWaitTime.isZero()) { return fluxError(logger, new NullPointerException("'maxWaitTime' cannot be negative or zero.")); } return receive().take(maxNumberOfMessages).take(maxWaitTime); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * * @return A deferred message with the matching {@code sequenceNumber}. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) { return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * @param sessionId Session id of the deferred message. {@code null} if there is no session. * * @return A deferred message with the matching {@code sequenceNumber}. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last()) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil())); } return receivedMessage; }); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * * @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(Iterable<Long> sequenceNumbers) { return receiveDeferredMessageBatch(sequenceNumbers, receiverOptions.getSessionId()); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * @param sessionId Session id of the deferred messages. {@code null} if there is no session. * * @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(Iterable<Long> sequenceNumbers, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), sequenceNumbers)) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil())); } return receivedMessage; }); } /** * Asynchronously renews the lock on the specified message. The lock will be renewed based on the setting specified * on the entity. When a message is received in {@link ReceiveMode * server for this receiver instance for a duration as specified during the Queue creation (LockDuration). If * processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the * lock is reset to the entity's LockDuration value. * * @param lockToken Lock token of the message to renew. * * @return The new expiration time for the message. * @throws NullPointerException if {@code lockToken} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalStateException if the receiver is a session receiver. * @throws IllegalArgumentException if {@code lockToken} is an empty value. */ public Mono<Instant> renewMessageLock(String lockToken) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } else if (Objects.isNull(lockToken)) { return monoError(logger, new NullPointerException("'lockToken' cannot be null.")); } else if (lockToken.isEmpty()) { return monoError(logger, new IllegalArgumentException("'lockToken' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", lockToken))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, getLinkName(null))) .map(instant -> managementNodeLocks.addOrUpdate(lockToken, instant)); } /** * Sets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<Instant> renewSessionLock(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot renew session lock on a non-session receiver.")); } final String linkName = unnamedSessionManager != null ? unnamedSessionManager.getLinkName(sessionId) : null; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.renewSessionLock(sessionId, linkName)); } /** * Sets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * @param sessionState State to set on the session. * * @return A Mono that completes when the session is set * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<Void> setSessionState(String sessionId, byte[] sessionState) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot set session state on a non-session receiver.")); } final String linkName = unnamedSessionManager != null ? unnamedSessionManager.getLinkName(sessionId) : null; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName)); } /** * Starts a new service side transaction. The {@link ServiceBusTransactionContext} should be passed to all * operations that needs to be in this transaction. * * <p><strong>Create a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.createTransaction} * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())); } /** * Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * <p><strong>Commit a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.commitTransaction} * * @param transactionContext to be committed. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * <p><strong>Rollback a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.rollbackTransaction} * * @param transactionContext to be rollbacked. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Disposes of the consumer by closing the underlying connection to the service. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } logger.info("Removing receiver links."); final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null); if (disposed != null) { disposed.close(); } if (unnamedSessionManager != null) { unnamedSessionManager.close(); } onClientClose.run(); } /** * Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are * held by the management node when they are received from the management node or management operations are * performed using that {@code lockToken}. * * @param lockToken Lock token to check for. * * @return {@code true} if the management node contains the lock token and false otherwise. */ private boolean isManagementToken(String lockToken) { return managementNodeLocks.contains(lockToken); } private Mono<Void> updateDisposition(String lockToken, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, String sessionId, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue()))); } else if (Objects.isNull(lockToken)) { return monoError(logger, new NullPointerException("'lockToken' cannot be null.")); } else if (lockToken.isEmpty()) { return monoError(logger, new IllegalArgumentException("'lockToken' cannot be empty.")); } if (receiverOptions.getReceiveMode() != ReceiveMode.PEEK_LOCK) { return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format( "'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus)))); } final String sessionIdToUse; if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) { sessionIdToUse = receiverOptions.getSessionId(); } else { sessionIdToUse = sessionId; } logger.info("{}: Update started. Disposition: {}. Lock: {}. SessionId {}.", entityPath, dispositionStatus, lockToken, sessionIdToUse); final Mono<Void> performOnManagement = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext)) .then(Mono.fromRunnable(() -> { logger.info("{}: Management node Update completed. Disposition: {}. Lock: {}.", entityPath, dispositionStatus, lockToken); managementNodeLocks.remove(lockToken); })); if (unnamedSessionManager != null) { return unnamedSessionManager.updateDisposition(lockToken, sessionId, dispositionStatus, propertiesToModify, deadLetterReason, deadLetterErrorDescription, transactionContext) .flatMap(isSuccess -> { if (isSuccess) { return Mono.empty(); } logger.info("Could not perform on session manger. Performing on management node."); return performOnManagement; }); } final ServiceBusAsyncConsumer existingConsumer = consumer.get(); if (isManagementToken(lockToken) || existingConsumer == null) { return performOnManagement; } else { return existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, transactionContext) .then(Mono.fromRunnable(() -> logger.info("{}: Update completed. Disposition: {}. Lock: {}.", entityPath, dispositionStatus, lockToken))); } } private ServiceBusAsyncConsumer getOrCreateConsumer() { final ServiceBusAsyncConsumer existing = consumer.get(); if (existing != null) { return existing; } final String linkName = StringUtil.getRandomString(entityPath); logger.info("{}: Creating consumer for link '{}'", entityPath, linkName); final Flux<ServiceBusReceiveLink> receiveLink = connectionProcessor.flatMap(connection -> { if (receiverOptions.isSessionReceiver()) { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, receiverOptions.getSessionId()); } else { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType); } }) .doOnNext(next -> { final String format = "Created consumer for Service Bus resource: [{}] mode: [{}]" + " sessionEnabled? {} transferEntityPath: [{}], entityType: [{}]"; logger.verbose(format, next.getEntityPath(), receiverOptions.getReceiveMode(), CoreUtils.isNullOrEmpty(receiverOptions.getSessionId()), "N/A", entityType); }) .repeat(); final LinkErrorContext context = new LinkErrorContext(fullyQualifiedNamespace, entityPath, linkName, null); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions()); final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLink.subscribeWith( new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy, connectionProcessor, context)); final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, false, receiverOptions.autoLockRenewalEnabled(), receiverOptions.getMaxAutoLockRenewalDuration(), connectionProcessor.getRetryOptions(), (token, associatedLinkName) -> renewMessageLock(token, associatedLinkName)); if (consumer.compareAndSet(null, newConsumer)) { return newConsumer; } else { newConsumer.close(); return consumer.get(); } } /** * @return receiver options set by user; */ ReceiverOptions getReceiverOptions() { return receiverOptions; } /** * Renews the message lock, and updates its value in the container. */ private Mono<Instant> renewMessageLock(String lockToken, String linkName) { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, linkName)); } /** * If the receiver has not connected via {@link * the management node. * * @return The name of the receive link, or null of it has not connected via a receive link. */ private String getLinkName(String sessionId) { if (unnamedSessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) { return unnamedSessionManager.getLinkName(sessionId); } else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) { return null; } else { final ServiceBusAsyncConsumer existing = consumer.get(); return existing != null ? existing.getLinkName() : null; } } }
Any reason `Class.forName(String)` needs to be used instead of `ByteBufferReceiveTest.class`?
public static void main(String[] args) { Class<?>[] testClasses; try { testClasses = new Class<?>[]{ Class.forName("com.azure.core.perf.ByteBufferReceiveTest"), Class.forName("com.azure.core.perf.JsonReceiveTest"), Class.forName("com.azure.core.perf.JsonSendTest"), Class.forName("com.azure.core.perf.XmlReceiveTest"), Class.forName("com.azure.core.perf.XmlSendTest"), Class.forName("com.azure.core.perf.ByteBufferSendTest") }; } catch (ClassNotFoundException e) { throw new RuntimeException(e); } PerfStressProgram.run(testClasses, args); }
Class.forName("com.azure.core.perf.ByteBufferReceiveTest"),
public static void main(String[] args) { PerfStressProgram.run(new Class<?>[]{ ByteBufferReceiveTest.class, JsonReceiveTest.class, JsonSendTest.class, XmlReceiveTest.class, XmlSendTest.class, ByteBufferSendTest.class }, args); }
class App { }
class App { }
Is the same seed used to ensure runs are consistent across each other?
public ByteBufferReceiveTest(PerfStressOptions options) throws IOException, URISyntaxException { super(options); bodyBytes = new byte[(int) options.getSize()]; new Random(0).nextBytes(bodyBytes); mockHTTPClient = new MockHttpClient(httpRequest -> createMockResponse(httpRequest, "application/octet-stream", bodyBytes)); final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(mockHTTPClient) .build(); service = RestProxy.create(MyRestProxyService.class, pipeline); }
new Random(0).nextBytes(bodyBytes);
public ByteBufferReceiveTest(PerfStressOptions options) throws IOException, URISyntaxException { super(options); bodyBytes = new byte[(int) options.getSize()]; new Random(0).nextBytes(bodyBytes); mockHTTPClient = new MockHttpClient(httpRequest -> createMockResponse(httpRequest, "application/octet-stream", bodyBytes)); final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(mockHTTPClient) .build(); service = RestProxy.create(MyRestProxyService.class, pipeline); }
class ByteBufferReceiveTest extends RestProxyTestBase<PerfStressOptions> { private final MockHttpClient mockHTTPClient; private final MyRestProxyService service; private final byte[] bodyBytes; @Override public void run() { throw new UnsupportedOperationException(); } @Override public Mono<Void> runAsync() { return service.getRawDataAsync() .flatMapMany(response -> response.getValue()) .map(byteBuffer -> { for (int i = 0; i < byteBuffer.remaining(); i++) { byteBuffer.get(); } return 1; }).then(); } }
class ByteBufferReceiveTest extends RestProxyTestBase<PerfStressOptions> { private final MockHttpClient mockHTTPClient; private final MyRestProxyService service; private final byte[] bodyBytes; @Override public void run() { throw new UnsupportedOperationException(); } @Override public Mono<Void> runAsync() { return service.getRawDataAsync() .flatMapMany(response -> response.getValue()) .map(byteBuffer -> { for (int i = 0; i < byteBuffer.remaining(); i++) { byteBuffer.get(); } return 1; }).then(); } }
This is to simulate consumption of the response stream, correct?
public Mono<Void> runAsync() { return service.getRawDataAsync() .flatMapMany(response -> response.getValue()) .map(byteBuffer -> { for (int i = 0; i < byteBuffer.remaining(); i++) { byteBuffer.get(); } return 1; }).then(); }
}
public Mono<Void> runAsync() { return service.getRawDataAsync() .flatMapMany(response -> response.getValue()) .map(byteBuffer -> { for (int i = 0; i < byteBuffer.remaining(); i++) { byteBuffer.get(); } return 1; }).then(); }
class ByteBufferReceiveTest extends RestProxyTestBase<PerfStressOptions> { private final MockHttpClient mockHTTPClient; private final MyRestProxyService service; private final byte[] bodyBytes; public ByteBufferReceiveTest(PerfStressOptions options) throws IOException, URISyntaxException { super(options); bodyBytes = new byte[(int) options.getSize()]; new Random(0).nextBytes(bodyBytes); mockHTTPClient = new MockHttpClient(httpRequest -> createMockResponse(httpRequest, "application/octet-stream", bodyBytes)); final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(mockHTTPClient) .build(); service = RestProxy.create(MyRestProxyService.class, pipeline); } @Override public void run() { throw new UnsupportedOperationException(); } @Override }
class ByteBufferReceiveTest extends RestProxyTestBase<PerfStressOptions> { private final MockHttpClient mockHTTPClient; private final MyRestProxyService service; private final byte[] bodyBytes; public ByteBufferReceiveTest(PerfStressOptions options) throws IOException, URISyntaxException { super(options); bodyBytes = new byte[(int) options.getSize()]; new Random(0).nextBytes(bodyBytes); mockHTTPClient = new MockHttpClient(httpRequest -> createMockResponse(httpRequest, "application/octet-stream", bodyBytes)); final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(mockHTTPClient) .build(); service = RestProxy.create(MyRestProxyService.class, pipeline); } @Override public void run() { throw new UnsupportedOperationException(); } @Override }
Yes, this would be interesting. For example having a block size equivalent to the TCP packet size, etc.
public ByteBufferSendTest(PerfStressOptions options) { super(options); dataToSend = TestDataCreationHelper.createRandomByteBufferFlux(options.getSize()); mockHttpReceiveClient = new MockHttpReceiveClient(); final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(mockHttpReceiveClient) .build(); service = RestProxy.create(MyRestProxyService.class, pipeline); }
public ByteBufferSendTest(PerfStressOptions options) { super(options); dataToSend = TestDataCreationHelper.createRandomByteBufferFlux(options.getSize()); mockHttpReceiveClient = new MockHttpReceiveClient(); final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(mockHttpReceiveClient) .build(); service = RestProxy.create(MyRestProxyService.class, pipeline); }
class ByteBufferSendTest extends RestProxyTestBase<PerfStressOptions> { private final MockHttpReceiveClient mockHttpReceiveClient; private final MyRestProxyService service; private final Flux<ByteBuffer> dataToSend; @Override public void run() { throw new UnsupportedOperationException(); } @Override public Mono<Void> runAsync() { return dataToSend .map(byteBuffer -> { service.setRawData(Flux.just(byteBuffer), byteBuffer.remaining()); return 1; }).then(); } }
class ByteBufferSendTest extends RestProxyTestBase<PerfStressOptions> { private final MockHttpReceiveClient mockHttpReceiveClient; private final MyRestProxyService service; private final Flux<ByteBuffer> dataToSend; @Override public void run() { throw new UnsupportedOperationException(); } @Override public Mono<Void> runAsync() { return dataToSend .map(byteBuffer -> { service.setRawData(Flux.just(byteBuffer), byteBuffer.remaining()); return 1; }).then(); } }
with this you will be loosing the cause exception stacktrace. You probably want to keep the root cause exception stacktrace.
public int compare(Object obj1, Object obj2) { ItemType type1 = ItemTypeHelper.getOrderByItemType(obj1); ItemType type2 = ItemTypeHelper.getOrderByItemType(obj2); int cmp = Integer.compare(type1.getVal(), type2.getVal()); if (cmp != 0) { return cmp; } switch (type1) { case NoValue: case Null: return 0; case Boolean: return Boolean.compare((Boolean) obj1, (Boolean) obj2); case Number: return Double.compare(((Number) obj1).doubleValue(), ((Number) obj2).doubleValue()); case String: return ((String) obj1).compareTo((String) obj2); case ArrayNode: case ObjectNode: try{ UInt128 hash1 = DistinctHash.getHash(obj1); UInt128 hash2 = DistinctHash.getHash(obj2); return hash1.compareTo(hash2); } catch (IOException e) { throw new IllegalStateException(String.format("Exception for getting hash for type %s: %s ", type1.toString(), e.getMessage())); } default: throw new ClassCastException(String.format("Unexpected type: %s", type1.toString())); } }
throw new IllegalStateException(String.format("Exception for getting hash for type %s: %s ", type1.toString(), e.getMessage()));
public int compare(Object obj1, Object obj2) { ItemType type1 = ItemTypeHelper.getOrderByItemType(obj1); ItemType type2 = ItemTypeHelper.getOrderByItemType(obj2); int cmp = Integer.compare(type1.getVal(), type2.getVal()); if (cmp != 0) { return cmp; } switch (type1) { case NoValue: case Null: return 0; case Boolean: return Boolean.compare((Boolean) obj1, (Boolean) obj2); case Number: return Double.compare(((Number) obj1).doubleValue(), ((Number) obj2).doubleValue()); case String: return ((String) obj1).compareTo((String) obj2); case ArrayNode: case ObjectNode: try{ UInt128 hash1 = DistinctHash.getHash(obj1); UInt128 hash2 = DistinctHash.getHash(obj2); return hash1.compareTo(hash2); } catch (IOException e) { throw new IllegalStateException(String.format("Getting hash exception for type %s ", type1.toString()), e); } default: throw new ClassCastException(String.format("Unexpected type: %s", type1.toString())); } }
class SingletonHelper { private static final ItemComparator INSTANCE = new ItemComparator(); }
class SingletonHelper { private static final ItemComparator INSTANCE = new ItemComparator(); }
You don't need to mock this anymore you can set the string and return it.
void setup() { MockitoAnnotations.initMocks(this); when(asyncClient.getEntityPath()).thenReturn(ENTITY_PATH); when(asyncClient.getFullyQualifiedNamespace()).thenReturn(NAMESPACE); when(asyncClient.getReceiverOptions()).thenReturn(new ReceiverOptions(ReceiveMode.PEEK_LOCK, 1, maxAutoLockRenewalDuration)); when(messageLockToken).thenReturn(LOCK_TOKEN); client = new ServiceBusReceiverClient(asyncClient, OPERATION_TIMEOUT); }
when(messageLockToken).thenReturn(LOCK_TOKEN);
void setup() { MockitoAnnotations.initMocks(this); when(asyncClient.getEntityPath()).thenReturn(ENTITY_PATH); when(asyncClient.getFullyQualifiedNamespace()).thenReturn(NAMESPACE); when(asyncClient.getReceiverOptions()).thenReturn(new ReceiverOptions(ReceiveMode.PEEK_LOCK, 1, maxAutoLockRenewalDuration)); client = new ServiceBusReceiverClient(asyncClient, OPERATION_TIMEOUT); }
class ServiceBusReceiverClientTest { private static final String NAMESPACE = "test-namespace"; private static final String ENTITY_PATH = "test-entity-path"; private static final String LOCK_TOKEN = UUID.randomUUID().toString(); private static final Duration OPERATION_TIMEOUT = Duration.ofSeconds(5); private final ClientLogger logger = new ClientLogger(ServiceBusReceiverClientTest.class); private Duration maxAutoLockRenewalDuration; private ServiceBusReceiverClient client; @Mock private ServiceBusReceiverAsyncClient asyncClient; @Mock private String messageLockToken; @Mock private Map<String, Object> propertiesToModify; @Mock ServiceBusTransactionContext transactionContext; @BeforeEach @AfterEach void teardown() { Mockito.framework().clearInlineMocks(); } @Test void nullConstructor() { assertThrows(NullPointerException.class, () -> new ServiceBusReceiverClient(null, OPERATION_TIMEOUT)); assertThrows(NullPointerException.class, () -> new ServiceBusReceiverClient(asyncClient, null)); } @Test void properties() { assertEquals(NAMESPACE, client.getFullyQualifiedNamespace()); assertEquals(ENTITY_PATH, client.getEntityPath()); } @Test void abandonMessageWithTransaction() { when(asyncClient.abandon(eq(messageLockToken), isNull(), any(ServiceBusTransactionContext.class))).thenReturn(Mono.empty()); client.abandon(messageLockToken, null, transactionContext); verify(asyncClient).abandon(argThat(ServiceBusReceiverClientTest::lockTokenEquals), isNull(), eq(transactionContext)); } @Test void abandonMessage() { when(asyncClient.abandon(eq(messageLockToken))).thenReturn(Mono.empty()); client.abandon(messageLockToken); verify(asyncClient).abandon(argThat(ServiceBusReceiverClientTest::lockTokenEquals)); } @Test void abandonMessageWithProperties() { when(asyncClient.abandon(eq(messageLockToken), anyMap())).thenReturn(Mono.empty()); when(asyncClient.abandon(eq(messageLockToken), any(), anyString())).thenReturn(Mono.empty()); client.abandon(messageLockToken, propertiesToModify); verify(asyncClient).abandon(argThat(ServiceBusReceiverClientTest::lockTokenEquals), eq(propertiesToModify)); } @Test void completeMessageWithTransaction() { when(asyncClient.complete(eq(messageLockToken), any(ServiceBusTransactionContext.class))).thenReturn(Mono.empty()); client.complete(messageLockToken, transactionContext); verify(asyncClient).complete(argThat(ServiceBusReceiverClientTest::lockTokenEquals), eq(transactionContext)); } @Test void completeMessage() { when(asyncClient.complete(eq(messageLockToken))).thenReturn(Mono.empty()); client.complete(messageLockToken); verify(asyncClient).complete(argThat(ServiceBusReceiverClientTest::lockTokenEquals)); } @Test void deferMessage() { when(asyncClient.defer(eq(messageLockToken))).thenReturn(Mono.empty()); client.defer(messageLockToken); verify(asyncClient).defer(argThat(ServiceBusReceiverClientTest::lockTokenEquals)); } @Test void deferMessageWithPropertiesWithTransaction() { when(asyncClient.defer(eq(messageLockToken), anyMap(), any(ServiceBusTransactionContext.class))).thenReturn(Mono.empty()); when(asyncClient.defer(eq(messageLockToken), any(), anyString(), any(ServiceBusTransactionContext.class))).thenReturn(Mono.empty()); client.defer(messageLockToken, propertiesToModify, transactionContext); verify(asyncClient).defer(argThat(ServiceBusReceiverClientTest::lockTokenEquals), eq(propertiesToModify), eq(transactionContext)); } @Test void deferMessageWithProperties() { when(asyncClient.defer(eq(messageLockToken), anyMap())).thenReturn(Mono.empty()); when(asyncClient.defer(eq(messageLockToken), any(), anyString())).thenReturn(Mono.empty()); client.defer(messageLockToken, propertiesToModify); verify(asyncClient).defer(argThat(ServiceBusReceiverClientTest::lockTokenEquals), eq(propertiesToModify)); } @Test void deadLetterMessage() { when(asyncClient.deadLetter(eq(messageLockToken))).thenReturn(Mono.empty()); client.deadLetter(messageLockToken); verify(asyncClient).deadLetter(argThat(ServiceBusReceiverClientTest::lockTokenEquals)); } @Test void deadLetterMessageWithOptionsWithTransaction() { final DeadLetterOptions options = new DeadLetterOptions() .setDeadLetterErrorDescription("foo") .setDeadLetterReason("bar") .setPropertiesToModify(propertiesToModify); when(asyncClient.deadLetter(eq(messageLockToken), any(DeadLetterOptions.class), any(ServiceBusTransactionContext.class))) .thenReturn(Mono.empty()); client.deadLetter(messageLockToken, options, transactionContext); verify(asyncClient).deadLetter(argThat(ServiceBusReceiverClientTest::lockTokenEquals), eq(options), eq(transactionContext)); } @Test void deadLetterMessageWithOptions() { final DeadLetterOptions options = new DeadLetterOptions() .setDeadLetterErrorDescription("foo") .setDeadLetterReason("bar") .setPropertiesToModify(propertiesToModify); when(asyncClient.deadLetter(eq(messageLockToken), any(DeadLetterOptions.class))) .thenReturn(Mono.empty()); client.deadLetter(messageLockToken, options); verify(asyncClient).deadLetter(argThat(ServiceBusReceiverClientTest::lockTokenEquals), eq(options)); } @Test void getSessionState() { final String sessionId = "a-session-id"; final byte[] contents = new byte[]{10, 111, 23}; when(asyncClient.getSessionState(sessionId)).thenReturn(Mono.just(contents)); final byte[] actual = client.getSessionState(sessionId); assertEquals(contents, actual); } @Test void getSessionStateNull() { final String sessionId = "a-session-id"; when(asyncClient.getSessionState(sessionId)).thenReturn(Mono.empty()); final byte[] actual = client.getSessionState(sessionId); assertNull(actual); } @Test void peekMessage() { final ServiceBusReceivedMessage message = mock(ServiceBusReceivedMessage.class); when(asyncClient.peek()).thenReturn(Mono.just(message)); final ServiceBusReceivedMessage actual = client.peek(); assertEquals(message, actual); } @Test void peekMessageFromSequence() { final long sequenceNumber = 154; final ServiceBusReceivedMessage message = mock(ServiceBusReceivedMessage.class); when(asyncClient.peekAt(sequenceNumber)).thenReturn(Mono.just(message)); final ServiceBusReceivedMessage actual = client.peekAt(sequenceNumber); assertEquals(message, actual); } /** * Verifies that all requested messages are returned when we can satisfy them all. */ @Test void peekBatchMessagesMax() { final int maxMessages = 10; Flux<ServiceBusReceivedMessage> messages = Flux.create(sink -> { final AtomicInteger emittedMessages = new AtomicInteger(); sink.onRequest(number -> { logger.info("Requesting {} messages.", number); if (emittedMessages.get() >= maxMessages) { logger.info("Completing sink."); sink.complete(); return; } for (int i = 0; i < number; i++) { sink.next(mock(ServiceBusReceivedMessage.class)); final int emit = emittedMessages.incrementAndGet(); if (emit >= maxMessages) { logger.info("Completing sink."); sink.complete(); break; } } }); }); when(asyncClient.peekBatch(maxMessages)).thenReturn(messages); final IterableStream<ServiceBusReceivedMessage> actual = client.peekBatch(maxMessages); assertNotNull(actual); final List<ServiceBusReceivedMessage> collected = actual.stream().collect(Collectors.toList()); assertEquals(maxMessages, collected.size()); } /** * Verifies that the messages completes when time has elapsed. */ @Test void peekBatchMessagesLessThan() { final int maxMessages = 10; final int returnedMessages = 7; Flux<ServiceBusReceivedMessage> messages = Flux.create(sink -> { final AtomicInteger emittedMessages = new AtomicInteger(); sink.onRequest(number -> { logger.info("Requesting {} messages.", number); if (emittedMessages.get() >= returnedMessages) { logger.info("Completing sink. Max: {}", returnedMessages); sink.complete(); return; } for (int i = 0; i < number; i++) { sink.next(mock(ServiceBusReceivedMessage.class)); final int emit = emittedMessages.incrementAndGet(); if (emit >= returnedMessages) { logger.info("Completing sink.", returnedMessages); sink.complete(); break; } } }); }); when(asyncClient.peekBatch(maxMessages)).thenReturn(messages); final IterableStream<ServiceBusReceivedMessage> actual = client.peekBatch(maxMessages); assertNotNull(actual); final long collected = actual.stream().count(); assertEquals(returnedMessages, collected); } /** * Verifies that all requested messages are returned when we can satisfy them all. */ @Test void peekBatchMessagesMaxSequenceNumber() { final int maxMessages = 10; final long sequenceNumber = 100; final Flux<ServiceBusReceivedMessage> messages = Flux.create(sink -> { sink.onRequest(number -> { for (int i = 0; i < maxMessages; i++) { sink.next(mock(ServiceBusReceivedMessage.class)); } sink.complete(); }); }); when(asyncClient.peekBatchAt(maxMessages, sequenceNumber)).thenReturn(messages); final IterableStream<ServiceBusReceivedMessage> actual = client.peekBatchAt(maxMessages, sequenceNumber); assertNotNull(actual); final List<ServiceBusReceivedMessage> collected = actual.stream().collect(Collectors.toList()); assertEquals(maxMessages, collected.size()); } /** * Verifies we cannot pass null value for maxWaitTime while receiving. */ @Test void receiveMessageNullWaitTime() { final int maxMessages = 10; assertThrows(NullPointerException.class, () -> client.receive(maxMessages, (Duration) null)); } /** * Verifies we cannot pass negative value for maxWaitTime while receiving. */ @Test void receiveMessageNegativeWaitTime() { final int maxMessages = 10; Duration negativeReceiveWaitTime = Duration.ofSeconds(-10); assertThrows(IllegalArgumentException.class, () -> client.receive(maxMessages, negativeReceiveWaitTime)); } /** * Verifies that all requested messages are returned when we can satisfy them all. */ @Test void receiveMessagesWithUserSpecifiedTimeout() { final int maxMessages = 10; final int numberToEmit = 5; final Duration receiveTimeout = Duration.ofSeconds(2); final AtomicInteger emittedMessages = new AtomicInteger(); Flux<ServiceBusReceivedMessageContext> messageSink = Flux.create(sink -> { sink.onRequest(e -> { if (emittedMessages.get() >= numberToEmit) { logger.info("Cannot emit more. Reached max already. Emitted: {}. Max: {}", emittedMessages.get(), numberToEmit); return; } for (int i = 0; i < numberToEmit; i++) { ServiceBusReceivedMessageContext context = new ServiceBusReceivedMessageContext( mock(ServiceBusReceivedMessage.class)); sink.next(context); final int emit = emittedMessages.incrementAndGet(); if (emit >= numberToEmit) { logger.info("Cannot emit more. Reached max already. Emitted: {}. Max: {}", emit, maxMessages); break; } } }); sink.onCancel(() -> { logger.info("Cancelled. Completing sink."); sink.complete(); }); }); when(asyncClient.receive()).thenReturn(messageSink); final IterableStream<ServiceBusReceivedMessageContext> actual = client.receive(maxMessages, receiveTimeout); assertNotNull(actual); final long collected = actual.stream().count(); assertEquals(numberToEmit, collected); } /** * Verifies that all requested messages are returned when we can satisfy them all. */ @Test void receiveMessagesMax() { final int maxMessages = 10; final int numberToEmit = maxMessages + 5; Flux<ServiceBusReceivedMessageContext> messageSink = Flux.create(sink -> { sink.onRequest(e -> { final AtomicInteger emittedMessages = new AtomicInteger(); if (emittedMessages.get() >= numberToEmit) { logger.info("Cannot emit more. Reached max already. Emitted: {}. Max: {}", emittedMessages.get(), numberToEmit); return; } for (int i = 0; i < numberToEmit; i++) { sink.next(new ServiceBusReceivedMessageContext(mock(ServiceBusReceivedMessage.class))); final int emit = emittedMessages.incrementAndGet(); if (emit >= numberToEmit) { logger.info("Cannot emit more. Reached max already. Emitted: {}. Max: {}", emit, maxMessages); break; } } }); sink.onCancel(() -> { logger.info("Cancelled. Completing sink."); sink.complete(); }); }); when(asyncClient.receive()).thenReturn(messageSink); final IterableStream<ServiceBusReceivedMessageContext> actual = client.receive(maxMessages); assertNotNull(actual); final long collected = actual.stream().count(); assertEquals(maxMessages, collected); } /** * Verifies that all requested messages are returned when we can satisfy them all. */ @Test void receiveMessagesTimeout() { final int maxMessages = 10; final int numberToEmit = 5; final AtomicInteger emittedMessages = new AtomicInteger(); Flux<ServiceBusReceivedMessageContext> messageSink = Flux.create(sink -> { sink.onRequest(e -> { if (emittedMessages.get() >= numberToEmit) { logger.info("Cannot emit more. Reached max already. Emitted: {}. Max: {}", emittedMessages.get(), numberToEmit); return; } for (int i = 0; i < numberToEmit; i++) { sink.next(new ServiceBusReceivedMessageContext(mock(ServiceBusReceivedMessage.class))); final int emit = emittedMessages.incrementAndGet(); if (emit >= numberToEmit) { logger.info("Cannot emit more. Reached max already. Emitted: {}. Max: {}", emit, maxMessages); break; } } }); sink.onCancel(() -> { logger.info("Cancelled. Completing sink."); sink.complete(); }); }); when(asyncClient.receive()).thenReturn(messageSink); final IterableStream<ServiceBusReceivedMessageContext> actual = client.receive(maxMessages); assertNotNull(actual); final long collected = actual.stream().count(); assertEquals(numberToEmit, collected); } @Test void receiveDeferredMessage() { final long sequenceNumber = 231412; final ServiceBusReceivedMessage message = mock(ServiceBusReceivedMessage.class); when(asyncClient.receiveDeferredMessage(anyLong())).thenReturn(Mono.just(message)); final ServiceBusReceivedMessage actual = client.receiveDeferredMessage(sequenceNumber); assertEquals(message, actual); verify(asyncClient).receiveDeferredMessage(sequenceNumber); } @Test void receiveDeferredMessageBatch() { final long sequenceNumber = 154; final long sequenceNumber2 = 13124; final ServiceBusReceivedMessage message = mock(ServiceBusReceivedMessage.class); final ServiceBusReceivedMessage message2 = mock(ServiceBusReceivedMessage.class); when(asyncClient.receiveDeferredMessageBatch(any())).thenReturn(Flux.just(message, message2)); List<Long> collection = Arrays.asList(sequenceNumber, sequenceNumber2); final IterableStream<ServiceBusReceivedMessage> actual = client.receiveDeferredMessageBatch(collection); assertNotNull(actual); final List<ServiceBusReceivedMessage> collected = actual.stream().collect(Collectors.toList()); assertEquals(2, collected.size()); assertEquals(message, collected.get(0)); assertEquals(message2, collected.get(1)); } @Test void renewMessageLock() { final Instant response = Instant.ofEpochSecond(1585259339); when(asyncClient.renewMessageLock(messageLockToken)).thenReturn(Mono.just(response)); final Instant actual = client.renewMessageLock(messageLockToken); assertEquals(response, actual); } @Test void renewSessionLock() { final String sessionId = "a-session-id"; final Instant response = Instant.ofEpochSecond(1585259339); when(asyncClient.renewSessionLock(sessionId)).thenReturn(Mono.just(response)); final Instant actual = client.renewSessionLock(sessionId); assertEquals(response, actual); } @Test void setSessionState() { final String sessionId = "a-session-id"; final byte[] contents = new byte[]{10, 111, 23}; when(asyncClient.setSessionState(sessionId, contents)).thenReturn(Mono.empty()); client.setSessionState(sessionId, contents); verify(asyncClient).setSessionState(sessionId, contents); } private static boolean lockTokenEquals(String compared) { return compared != null && LOCK_TOKEN.equals(compared); } }
class ServiceBusReceiverClientTest { private static final String NAMESPACE = "test-namespace"; private static final String ENTITY_PATH = "test-entity-path"; private static final String LOCK_TOKEN = UUID.randomUUID().toString(); private static final Duration OPERATION_TIMEOUT = Duration.ofSeconds(5); private final ClientLogger logger = new ClientLogger(ServiceBusReceiverClientTest.class); private Duration maxAutoLockRenewalDuration; private ServiceBusReceiverClient client; @Mock private ServiceBusReceiverAsyncClient asyncClient; @Mock private Map<String, Object> propertiesToModify; @Mock ServiceBusTransactionContext transactionContext; @BeforeEach @AfterEach void teardown() { Mockito.framework().clearInlineMocks(); } @Test void nullConstructor() { assertThrows(NullPointerException.class, () -> new ServiceBusReceiverClient(null, OPERATION_TIMEOUT)); assertThrows(NullPointerException.class, () -> new ServiceBusReceiverClient(asyncClient, null)); } @Test void properties() { assertEquals(NAMESPACE, client.getFullyQualifiedNamespace()); assertEquals(ENTITY_PATH, client.getEntityPath()); } @Test void abandonMessageWithTransaction() { when(asyncClient.abandon(eq(LOCK_TOKEN), isNull(), any(ServiceBusTransactionContext.class))).thenReturn(Mono.empty()); client.abandon(LOCK_TOKEN, null, transactionContext); verify(asyncClient).abandon(argThat(ServiceBusReceiverClientTest::lockTokenEquals), isNull(), eq(transactionContext)); } @Test void abandonMessage() { when(asyncClient.abandon(eq(LOCK_TOKEN))).thenReturn(Mono.empty()); client.abandon(LOCK_TOKEN); verify(asyncClient).abandon(argThat(ServiceBusReceiverClientTest::lockTokenEquals)); } @Test void abandonMessageWithProperties() { when(asyncClient.abandon(eq(LOCK_TOKEN), anyMap())).thenReturn(Mono.empty()); when(asyncClient.abandon(eq(LOCK_TOKEN), any(), anyString())).thenReturn(Mono.empty()); client.abandon(LOCK_TOKEN, propertiesToModify); verify(asyncClient).abandon(argThat(ServiceBusReceiverClientTest::lockTokenEquals), eq(propertiesToModify)); } @Test void completeMessageWithTransaction() { when(asyncClient.complete(eq(LOCK_TOKEN), any(ServiceBusTransactionContext.class))).thenReturn(Mono.empty()); client.complete(LOCK_TOKEN, transactionContext); verify(asyncClient).complete(argThat(ServiceBusReceiverClientTest::lockTokenEquals), eq(transactionContext)); } @Test void completeMessage() { when(asyncClient.complete(eq(LOCK_TOKEN))).thenReturn(Mono.empty()); client.complete(LOCK_TOKEN); verify(asyncClient).complete(argThat(ServiceBusReceiverClientTest::lockTokenEquals)); } @Test void deferMessage() { when(asyncClient.defer(eq(LOCK_TOKEN))).thenReturn(Mono.empty()); client.defer(LOCK_TOKEN); verify(asyncClient).defer(argThat(ServiceBusReceiverClientTest::lockTokenEquals)); } @Test void deferMessageWithPropertiesWithTransaction() { when(asyncClient.defer(eq(LOCK_TOKEN), anyMap(), any(ServiceBusTransactionContext.class))).thenReturn(Mono.empty()); when(asyncClient.defer(eq(LOCK_TOKEN), any(), anyString(), any(ServiceBusTransactionContext.class))).thenReturn(Mono.empty()); client.defer(LOCK_TOKEN, propertiesToModify, transactionContext); verify(asyncClient).defer(argThat(ServiceBusReceiverClientTest::lockTokenEquals), eq(propertiesToModify), eq(transactionContext)); } @Test void deferMessageWithProperties() { when(asyncClient.defer(eq(LOCK_TOKEN), anyMap())).thenReturn(Mono.empty()); when(asyncClient.defer(eq(LOCK_TOKEN), any(), anyString())).thenReturn(Mono.empty()); client.defer(LOCK_TOKEN, propertiesToModify); verify(asyncClient).defer(argThat(ServiceBusReceiverClientTest::lockTokenEquals), eq(propertiesToModify)); } @Test void deadLetterMessage() { when(asyncClient.deadLetter(eq(LOCK_TOKEN))).thenReturn(Mono.empty()); client.deadLetter(LOCK_TOKEN); verify(asyncClient).deadLetter(argThat(ServiceBusReceiverClientTest::lockTokenEquals)); } @Test void deadLetterMessageWithOptionsWithTransaction() { final DeadLetterOptions options = new DeadLetterOptions() .setDeadLetterErrorDescription("foo") .setDeadLetterReason("bar") .setPropertiesToModify(propertiesToModify); when(asyncClient.deadLetter(eq(LOCK_TOKEN), any(DeadLetterOptions.class), any(ServiceBusTransactionContext.class))) .thenReturn(Mono.empty()); client.deadLetter(LOCK_TOKEN, options, transactionContext); verify(asyncClient).deadLetter(argThat(ServiceBusReceiverClientTest::lockTokenEquals), eq(options), eq(transactionContext)); } @Test void deadLetterMessageWithOptions() { final DeadLetterOptions options = new DeadLetterOptions() .setDeadLetterErrorDescription("foo") .setDeadLetterReason("bar") .setPropertiesToModify(propertiesToModify); when(asyncClient.deadLetter(eq(LOCK_TOKEN), any(DeadLetterOptions.class))) .thenReturn(Mono.empty()); client.deadLetter(LOCK_TOKEN, options); verify(asyncClient).deadLetter(argThat(ServiceBusReceiverClientTest::lockTokenEquals), eq(options)); } @Test void getSessionState() { final String sessionId = "a-session-id"; final byte[] contents = new byte[]{10, 111, 23}; when(asyncClient.getSessionState(sessionId)).thenReturn(Mono.just(contents)); final byte[] actual = client.getSessionState(sessionId); assertEquals(contents, actual); } @Test void getSessionStateNull() { final String sessionId = "a-session-id"; when(asyncClient.getSessionState(sessionId)).thenReturn(Mono.empty()); final byte[] actual = client.getSessionState(sessionId); assertNull(actual); } @Test void peekMessage() { final ServiceBusReceivedMessage message = mock(ServiceBusReceivedMessage.class); when(asyncClient.peek()).thenReturn(Mono.just(message)); final ServiceBusReceivedMessage actual = client.peek(); assertEquals(message, actual); } @Test void peekMessageFromSequence() { final long sequenceNumber = 154; final ServiceBusReceivedMessage message = mock(ServiceBusReceivedMessage.class); when(asyncClient.peekAt(sequenceNumber)).thenReturn(Mono.just(message)); final ServiceBusReceivedMessage actual = client.peekAt(sequenceNumber); assertEquals(message, actual); } /** * Verifies that all requested messages are returned when we can satisfy them all. */ @Test void peekBatchMessagesMax() { final int maxMessages = 10; Flux<ServiceBusReceivedMessage> messages = Flux.create(sink -> { final AtomicInteger emittedMessages = new AtomicInteger(); sink.onRequest(number -> { logger.info("Requesting {} messages.", number); if (emittedMessages.get() >= maxMessages) { logger.info("Completing sink."); sink.complete(); return; } for (int i = 0; i < number; i++) { sink.next(mock(ServiceBusReceivedMessage.class)); final int emit = emittedMessages.incrementAndGet(); if (emit >= maxMessages) { logger.info("Completing sink."); sink.complete(); break; } } }); }); when(asyncClient.peekBatch(maxMessages)).thenReturn(messages); final IterableStream<ServiceBusReceivedMessage> actual = client.peekBatch(maxMessages); assertNotNull(actual); final List<ServiceBusReceivedMessage> collected = actual.stream().collect(Collectors.toList()); assertEquals(maxMessages, collected.size()); } /** * Verifies that the messages completes when time has elapsed. */ @Test void peekBatchMessagesLessThan() { final int maxMessages = 10; final int returnedMessages = 7; Flux<ServiceBusReceivedMessage> messages = Flux.create(sink -> { final AtomicInteger emittedMessages = new AtomicInteger(); sink.onRequest(number -> { logger.info("Requesting {} messages.", number); if (emittedMessages.get() >= returnedMessages) { logger.info("Completing sink. Max: {}", returnedMessages); sink.complete(); return; } for (int i = 0; i < number; i++) { sink.next(mock(ServiceBusReceivedMessage.class)); final int emit = emittedMessages.incrementAndGet(); if (emit >= returnedMessages) { logger.info("Completing sink.", returnedMessages); sink.complete(); break; } } }); }); when(asyncClient.peekBatch(maxMessages)).thenReturn(messages); final IterableStream<ServiceBusReceivedMessage> actual = client.peekBatch(maxMessages); assertNotNull(actual); final long collected = actual.stream().count(); assertEquals(returnedMessages, collected); } /** * Verifies that all requested messages are returned when we can satisfy them all. */ @Test void peekBatchMessagesMaxSequenceNumber() { final int maxMessages = 10; final long sequenceNumber = 100; final Flux<ServiceBusReceivedMessage> messages = Flux.create(sink -> { sink.onRequest(number -> { for (int i = 0; i < maxMessages; i++) { sink.next(mock(ServiceBusReceivedMessage.class)); } sink.complete(); }); }); when(asyncClient.peekBatchAt(maxMessages, sequenceNumber)).thenReturn(messages); final IterableStream<ServiceBusReceivedMessage> actual = client.peekBatchAt(maxMessages, sequenceNumber); assertNotNull(actual); final List<ServiceBusReceivedMessage> collected = actual.stream().collect(Collectors.toList()); assertEquals(maxMessages, collected.size()); } /** * Verifies we cannot pass null value for maxWaitTime while receiving. */ @Test void receiveMessageNullWaitTime() { final int maxMessages = 10; assertThrows(NullPointerException.class, () -> client.receive(maxMessages, (Duration) null)); } /** * Verifies we cannot pass negative value for maxWaitTime while receiving. */ @Test void receiveMessageNegativeWaitTime() { final int maxMessages = 10; Duration negativeReceiveWaitTime = Duration.ofSeconds(-10); assertThrows(IllegalArgumentException.class, () -> client.receive(maxMessages, negativeReceiveWaitTime)); } /** * Verifies that all requested messages are returned when we can satisfy them all. */ @Test void receiveMessagesWithUserSpecifiedTimeout() { final int maxMessages = 10; final int numberToEmit = 5; final Duration receiveTimeout = Duration.ofSeconds(2); final AtomicInteger emittedMessages = new AtomicInteger(); Flux<ServiceBusReceivedMessageContext> messageSink = Flux.create(sink -> { sink.onRequest(e -> { if (emittedMessages.get() >= numberToEmit) { logger.info("Cannot emit more. Reached max already. Emitted: {}. Max: {}", emittedMessages.get(), numberToEmit); return; } for (int i = 0; i < numberToEmit; i++) { ServiceBusReceivedMessageContext context = new ServiceBusReceivedMessageContext( mock(ServiceBusReceivedMessage.class)); sink.next(context); final int emit = emittedMessages.incrementAndGet(); if (emit >= numberToEmit) { logger.info("Cannot emit more. Reached max already. Emitted: {}. Max: {}", emit, maxMessages); break; } } }); sink.onCancel(() -> { logger.info("Cancelled. Completing sink."); sink.complete(); }); }); when(asyncClient.receive()).thenReturn(messageSink); final IterableStream<ServiceBusReceivedMessageContext> actual = client.receive(maxMessages, receiveTimeout); assertNotNull(actual); final long collected = actual.stream().count(); assertEquals(numberToEmit, collected); } /** * Verifies that all requested messages are returned when we can satisfy them all. */ @Test void receiveMessagesMax() { final int maxMessages = 10; final int numberToEmit = maxMessages + 5; Flux<ServiceBusReceivedMessageContext> messageSink = Flux.create(sink -> { sink.onRequest(e -> { final AtomicInteger emittedMessages = new AtomicInteger(); if (emittedMessages.get() >= numberToEmit) { logger.info("Cannot emit more. Reached max already. Emitted: {}. Max: {}", emittedMessages.get(), numberToEmit); return; } for (int i = 0; i < numberToEmit; i++) { sink.next(new ServiceBusReceivedMessageContext(mock(ServiceBusReceivedMessage.class))); final int emit = emittedMessages.incrementAndGet(); if (emit >= numberToEmit) { logger.info("Cannot emit more. Reached max already. Emitted: {}. Max: {}", emit, maxMessages); break; } } }); sink.onCancel(() -> { logger.info("Cancelled. Completing sink."); sink.complete(); }); }); when(asyncClient.receive()).thenReturn(messageSink); final IterableStream<ServiceBusReceivedMessageContext> actual = client.receive(maxMessages); assertNotNull(actual); final long collected = actual.stream().count(); assertEquals(maxMessages, collected); } /** * Verifies that all requested messages are returned when we can satisfy them all. */ @Test void receiveMessagesTimeout() { final int maxMessages = 10; final int numberToEmit = 5; final AtomicInteger emittedMessages = new AtomicInteger(); Flux<ServiceBusReceivedMessageContext> messageSink = Flux.create(sink -> { sink.onRequest(e -> { if (emittedMessages.get() >= numberToEmit) { logger.info("Cannot emit more. Reached max already. Emitted: {}. Max: {}", emittedMessages.get(), numberToEmit); return; } for (int i = 0; i < numberToEmit; i++) { sink.next(new ServiceBusReceivedMessageContext(mock(ServiceBusReceivedMessage.class))); final int emit = emittedMessages.incrementAndGet(); if (emit >= numberToEmit) { logger.info("Cannot emit more. Reached max already. Emitted: {}. Max: {}", emit, maxMessages); break; } } }); sink.onCancel(() -> { logger.info("Cancelled. Completing sink."); sink.complete(); }); }); when(asyncClient.receive()).thenReturn(messageSink); final IterableStream<ServiceBusReceivedMessageContext> actual = client.receive(maxMessages); assertNotNull(actual); final long collected = actual.stream().count(); assertEquals(numberToEmit, collected); } @Test void receiveDeferredMessage() { final long sequenceNumber = 231412; final ServiceBusReceivedMessage message = mock(ServiceBusReceivedMessage.class); when(asyncClient.receiveDeferredMessage(anyLong())).thenReturn(Mono.just(message)); final ServiceBusReceivedMessage actual = client.receiveDeferredMessage(sequenceNumber); assertEquals(message, actual); verify(asyncClient).receiveDeferredMessage(sequenceNumber); } @Test void receiveDeferredMessageBatch() { final long sequenceNumber = 154; final long sequenceNumber2 = 13124; final ServiceBusReceivedMessage message = mock(ServiceBusReceivedMessage.class); final ServiceBusReceivedMessage message2 = mock(ServiceBusReceivedMessage.class); when(asyncClient.receiveDeferredMessageBatch(any())).thenReturn(Flux.just(message, message2)); List<Long> collection = Arrays.asList(sequenceNumber, sequenceNumber2); final IterableStream<ServiceBusReceivedMessage> actual = client.receiveDeferredMessageBatch(collection); assertNotNull(actual); final List<ServiceBusReceivedMessage> collected = actual.stream().collect(Collectors.toList()); assertEquals(2, collected.size()); assertEquals(message, collected.get(0)); assertEquals(message2, collected.get(1)); } @Test void renewMessageLock() { final Instant response = Instant.ofEpochSecond(1585259339); when(asyncClient.renewMessageLock(LOCK_TOKEN)).thenReturn(Mono.just(response)); final Instant actual = client.renewMessageLock(LOCK_TOKEN); assertEquals(response, actual); } @Test void renewSessionLock() { final String sessionId = "a-session-id"; final Instant response = Instant.ofEpochSecond(1585259339); when(asyncClient.renewSessionLock(sessionId)).thenReturn(Mono.just(response)); final Instant actual = client.renewSessionLock(sessionId); assertEquals(response, actual); } @Test void setSessionState() { final String sessionId = "a-session-id"; final byte[] contents = new byte[]{10, 111, 23}; when(asyncClient.setSessionState(sessionId, contents)).thenReturn(Mono.empty()); client.setSessionState(sessionId, contents); verify(asyncClient).setSessionState(sessionId, contents); } private static boolean lockTokenEquals(String compared) { return compared != null && LOCK_TOKEN.equals(compared); } }
I see, good point, changed to following: new IllegalStateException(String.format("Getting hash exception for type %s ", type1.toString()), e)
public int compare(Object obj1, Object obj2) { ItemType type1 = ItemTypeHelper.getOrderByItemType(obj1); ItemType type2 = ItemTypeHelper.getOrderByItemType(obj2); int cmp = Integer.compare(type1.getVal(), type2.getVal()); if (cmp != 0) { return cmp; } switch (type1) { case NoValue: case Null: return 0; case Boolean: return Boolean.compare((Boolean) obj1, (Boolean) obj2); case Number: return Double.compare(((Number) obj1).doubleValue(), ((Number) obj2).doubleValue()); case String: return ((String) obj1).compareTo((String) obj2); case ArrayNode: case ObjectNode: try{ UInt128 hash1 = DistinctHash.getHash(obj1); UInt128 hash2 = DistinctHash.getHash(obj2); return hash1.compareTo(hash2); } catch (IOException e) { throw new IllegalStateException(String.format("Exception for getting hash for type %s: %s ", type1.toString(), e.getMessage())); } default: throw new ClassCastException(String.format("Unexpected type: %s", type1.toString())); } }
throw new IllegalStateException(String.format("Exception for getting hash for type %s: %s ", type1.toString(), e.getMessage()));
public int compare(Object obj1, Object obj2) { ItemType type1 = ItemTypeHelper.getOrderByItemType(obj1); ItemType type2 = ItemTypeHelper.getOrderByItemType(obj2); int cmp = Integer.compare(type1.getVal(), type2.getVal()); if (cmp != 0) { return cmp; } switch (type1) { case NoValue: case Null: return 0; case Boolean: return Boolean.compare((Boolean) obj1, (Boolean) obj2); case Number: return Double.compare(((Number) obj1).doubleValue(), ((Number) obj2).doubleValue()); case String: return ((String) obj1).compareTo((String) obj2); case ArrayNode: case ObjectNode: try{ UInt128 hash1 = DistinctHash.getHash(obj1); UInt128 hash2 = DistinctHash.getHash(obj2); return hash1.compareTo(hash2); } catch (IOException e) { throw new IllegalStateException(String.format("Getting hash exception for type %s ", type1.toString()), e); } default: throw new ClassCastException(String.format("Unexpected type: %s", type1.toString())); } }
class SingletonHelper { private static final ItemComparator INSTANCE = new ItemComparator(); }
class SingletonHelper { private static final ItemComparator INSTANCE = new ItemComparator(); }
I don't think this exception message makes sense, since there is no `type1` in this try catch block.
public int compare(Object obj1, Object obj2) { ItemType type1 = ItemTypeHelper.getOrderByItemType(obj1); ItemType type2 = ItemTypeHelper.getOrderByItemType(obj2); int cmp = Integer.compare(type1.getVal(), type2.getVal()); if (cmp != 0) { return cmp; } switch (type1) { case NoValue: case Null: return 0; case Boolean: return Boolean.compare((Boolean) obj1, (Boolean) obj2); case Number: return Double.compare(((Number) obj1).doubleValue(), ((Number) obj2).doubleValue()); case String: return ((String) obj1).compareTo((String) obj2); case ArrayNode: case ObjectNode: try{ UInt128 hash1 = DistinctHash.getHash(obj1); UInt128 hash2 = DistinctHash.getHash(obj2); return hash1.compareTo(hash2); } catch (IOException e) { throw new IllegalStateException(String.format("Getting hash exception for type %s ", type1.toString()), e); } default: throw new ClassCastException(String.format("Unexpected type: %s", type1.toString())); } }
throw new IllegalStateException(String.format("Getting hash exception for type %s ", type1.toString()), e);
public int compare(Object obj1, Object obj2) { ItemType type1 = ItemTypeHelper.getOrderByItemType(obj1); ItemType type2 = ItemTypeHelper.getOrderByItemType(obj2); int cmp = Integer.compare(type1.getVal(), type2.getVal()); if (cmp != 0) { return cmp; } switch (type1) { case NoValue: case Null: return 0; case Boolean: return Boolean.compare((Boolean) obj1, (Boolean) obj2); case Number: return Double.compare(((Number) obj1).doubleValue(), ((Number) obj2).doubleValue()); case String: return ((String) obj1).compareTo((String) obj2); case ArrayNode: case ObjectNode: try{ UInt128 hash1 = DistinctHash.getHash(obj1); UInt128 hash2 = DistinctHash.getHash(obj2); return hash1.compareTo(hash2); } catch (IOException e) { throw new IllegalStateException(String.format("Getting hash exception for type %s ", type1.toString()), e); } default: throw new ClassCastException(String.format("Unexpected type: %s", type1.toString())); } }
class SingletonHelper { private static final ItemComparator INSTANCE = new ItemComparator(); }
class SingletonHelper { private static final ItemComparator INSTANCE = new ItemComparator(); }
hmmm, we could remove the type from the exception message, but thought it might give some hint which type is having hash issue?
public int compare(Object obj1, Object obj2) { ItemType type1 = ItemTypeHelper.getOrderByItemType(obj1); ItemType type2 = ItemTypeHelper.getOrderByItemType(obj2); int cmp = Integer.compare(type1.getVal(), type2.getVal()); if (cmp != 0) { return cmp; } switch (type1) { case NoValue: case Null: return 0; case Boolean: return Boolean.compare((Boolean) obj1, (Boolean) obj2); case Number: return Double.compare(((Number) obj1).doubleValue(), ((Number) obj2).doubleValue()); case String: return ((String) obj1).compareTo((String) obj2); case ArrayNode: case ObjectNode: try{ UInt128 hash1 = DistinctHash.getHash(obj1); UInt128 hash2 = DistinctHash.getHash(obj2); return hash1.compareTo(hash2); } catch (IOException e) { throw new IllegalStateException(String.format("Getting hash exception for type %s ", type1.toString()), e); } default: throw new ClassCastException(String.format("Unexpected type: %s", type1.toString())); } }
throw new IllegalStateException(String.format("Getting hash exception for type %s ", type1.toString()), e);
public int compare(Object obj1, Object obj2) { ItemType type1 = ItemTypeHelper.getOrderByItemType(obj1); ItemType type2 = ItemTypeHelper.getOrderByItemType(obj2); int cmp = Integer.compare(type1.getVal(), type2.getVal()); if (cmp != 0) { return cmp; } switch (type1) { case NoValue: case Null: return 0; case Boolean: return Boolean.compare((Boolean) obj1, (Boolean) obj2); case Number: return Double.compare(((Number) obj1).doubleValue(), ((Number) obj2).doubleValue()); case String: return ((String) obj1).compareTo((String) obj2); case ArrayNode: case ObjectNode: try{ UInt128 hash1 = DistinctHash.getHash(obj1); UInt128 hash2 = DistinctHash.getHash(obj2); return hash1.compareTo(hash2); } catch (IOException e) { throw new IllegalStateException(String.format("Getting hash exception for type %s ", type1.toString()), e); } default: throw new ClassCastException(String.format("Unexpected type: %s", type1.toString())); } }
class SingletonHelper { private static final ItemComparator INSTANCE = new ItemComparator(); }
class SingletonHelper { private static final ItemComparator INSTANCE = new ItemComparator(); }
why only type1 and not type2 ? Also, what does type1.toString() represents ?
public int compare(Object obj1, Object obj2) { ItemType type1 = ItemTypeHelper.getOrderByItemType(obj1); ItemType type2 = ItemTypeHelper.getOrderByItemType(obj2); int cmp = Integer.compare(type1.getVal(), type2.getVal()); if (cmp != 0) { return cmp; } switch (type1) { case NoValue: case Null: return 0; case Boolean: return Boolean.compare((Boolean) obj1, (Boolean) obj2); case Number: return Double.compare(((Number) obj1).doubleValue(), ((Number) obj2).doubleValue()); case String: return ((String) obj1).compareTo((String) obj2); case ArrayNode: case ObjectNode: try{ UInt128 hash1 = DistinctHash.getHash(obj1); UInt128 hash2 = DistinctHash.getHash(obj2); return hash1.compareTo(hash2); } catch (IOException e) { throw new IllegalStateException(String.format("Getting hash exception for type %s ", type1.toString()), e); } default: throw new ClassCastException(String.format("Unexpected type: %s", type1.toString())); } }
throw new IllegalStateException(String.format("Getting hash exception for type %s ", type1.toString()), e);
public int compare(Object obj1, Object obj2) { ItemType type1 = ItemTypeHelper.getOrderByItemType(obj1); ItemType type2 = ItemTypeHelper.getOrderByItemType(obj2); int cmp = Integer.compare(type1.getVal(), type2.getVal()); if (cmp != 0) { return cmp; } switch (type1) { case NoValue: case Null: return 0; case Boolean: return Boolean.compare((Boolean) obj1, (Boolean) obj2); case Number: return Double.compare(((Number) obj1).doubleValue(), ((Number) obj2).doubleValue()); case String: return ((String) obj1).compareTo((String) obj2); case ArrayNode: case ObjectNode: try{ UInt128 hash1 = DistinctHash.getHash(obj1); UInt128 hash2 = DistinctHash.getHash(obj2); return hash1.compareTo(hash2); } catch (IOException e) { throw new IllegalStateException(String.format("Getting hash exception for type %s ", type1.toString()), e); } default: throw new ClassCastException(String.format("Unexpected type: %s", type1.toString())); } }
class SingletonHelper { private static final ItemComparator INSTANCE = new ItemComparator(); }
class SingletonHelper { private static final ItemComparator INSTANCE = new ItemComparator(); }
I think with Java samples we want to start showing the easier way of doing things which are usually with higher versions of the Java language. But since our support needs to be catered to atleast Java 8 we show the Java 8 way and the other way is commented.
public static void main(String[] args) throws IOException { FormRecognizerAsyncClient client = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildAsyncClient(); File analyzeFile = new File("../formrecognizer/azure-ai-formrecognizer/src/samples/java/sample-forms/" + "forms/Form_1.jpg"); byte[] fileContent = Files.readAllBytes(analyzeFile.toPath()); PollerFlux<OperationResult, List<RecognizedForm>> labeledCustomFormPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions( toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{labeled_model_Id}") .setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true) .setPollInterval(Duration.ofSeconds(5))); PollerFlux<OperationResult, List<RecognizedForm>> unlabeledCustomFormPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{unlabeled_model_Id}", FormContentType.APPLICATION_PDF); Mono<List<RecognizedForm>> labeledDataResult = labeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); Mono<List<RecognizedForm>> unlabeledDataResult = unlabeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); System.out.println("--------Recognizing forms with labeled custom model--------"); labeledDataResult.subscribe(formsWithLabeledModel -> formsWithLabeledModel.forEach(labeledForm -> labeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f," + " %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); System.out.println("Value for a specific labeled field using the training-time label:"); labeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Merchant".equals(formFieldEntry.getKey())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Merchant name is: %s%n", formFieldEntry.getValue())); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } System.out.println("-----------------------------------------------------------"); System.out.println("-------Recognizing forms with unlabeled custom model-------"); unlabeledDataResult.subscribe(recognizedForms -> recognizedForms.forEach(unLabeledForm -> unLabeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } final StringBuilder boundingBoxLabelStr = new StringBuilder(); if (formField.getLabelText() != null && formField.getLabelText().getBoundingBox() != null) { formField.getLabelText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has label %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getLabelText().getText(), boundingBoxLabelStr, formField.getConfidence()); System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence " + "score of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); unLabeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Vendor Name:".equals(formFieldEntry.getValue().getLabelText().getText())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Vendor name is: %s%n", formFieldEntry.getValue())); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } }
public static void main(String[] args) throws IOException { FormRecognizerAsyncClient client = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildAsyncClient(); File analyzeFile = new File("../formrecognizer/azure-ai-formrecognizer/src/samples/java/sample-forms/" + "forms/Form_1.jpg"); byte[] fileContent = Files.readAllBytes(analyzeFile.toPath()); PollerFlux<OperationResult, List<RecognizedForm>> labeledCustomFormPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions( toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{labeled_model_Id}") .setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true) .setPollInterval(Duration.ofSeconds(5))); PollerFlux<OperationResult, List<RecognizedForm>> unlabeledCustomFormPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{unlabeled_model_Id}", FormContentType.APPLICATION_PDF); Mono<List<RecognizedForm>> labeledDataResult = labeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); Mono<List<RecognizedForm>> unlabeledDataResult = unlabeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); System.out.println("--------Recognizing forms with labeled custom model--------"); labeledDataResult.subscribe(formsWithLabeledModel -> formsWithLabeledModel.forEach(labeledForm -> labeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f," + " %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); System.out.println("Value for a specific labeled field using the training-time label:"); labeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Merchant".equals(formFieldEntry.getKey())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Merchant name is: %s%n", formFieldEntry.getValue())); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } System.out.println("-----------------------------------------------------------"); System.out.println("-------Recognizing forms with unlabeled custom model-------"); unlabeledDataResult.subscribe(recognizedForms -> recognizedForms.forEach(unLabeledForm -> unLabeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } final StringBuilder boundingBoxLabelStr = new StringBuilder(); if (formField.getLabelText() != null && formField.getLabelText().getBoundingBox() != null) { formField.getLabelText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has label %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getLabelText().getText(), boundingBoxLabelStr, formField.getConfidence()); System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence " + "score of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); unLabeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Vendor Name:".equals(formFieldEntry.getValue().getLabelText().getText())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Vendor name is: %s%n", formFieldEntry.getValue())); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } }
class AdvancedDiffLabeledUnlabeledDataAsync { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ }
class AdvancedDiffLabeledUnlabeledDataAsync { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ }
since we can only reach to this step when type1 equals type2, so I think either should work. it will tell us whether it is objectNode or arrayNode. (The name of the ItemType enum)
public int compare(Object obj1, Object obj2) { ItemType type1 = ItemTypeHelper.getOrderByItemType(obj1); ItemType type2 = ItemTypeHelper.getOrderByItemType(obj2); int cmp = Integer.compare(type1.getVal(), type2.getVal()); if (cmp != 0) { return cmp; } switch (type1) { case NoValue: case Null: return 0; case Boolean: return Boolean.compare((Boolean) obj1, (Boolean) obj2); case Number: return Double.compare(((Number) obj1).doubleValue(), ((Number) obj2).doubleValue()); case String: return ((String) obj1).compareTo((String) obj2); case ArrayNode: case ObjectNode: try{ UInt128 hash1 = DistinctHash.getHash(obj1); UInt128 hash2 = DistinctHash.getHash(obj2); return hash1.compareTo(hash2); } catch (IOException e) { throw new IllegalStateException(String.format("Getting hash exception for type %s ", type1.toString()), e); } default: throw new ClassCastException(String.format("Unexpected type: %s", type1.toString())); } }
throw new IllegalStateException(String.format("Getting hash exception for type %s ", type1.toString()), e);
public int compare(Object obj1, Object obj2) { ItemType type1 = ItemTypeHelper.getOrderByItemType(obj1); ItemType type2 = ItemTypeHelper.getOrderByItemType(obj2); int cmp = Integer.compare(type1.getVal(), type2.getVal()); if (cmp != 0) { return cmp; } switch (type1) { case NoValue: case Null: return 0; case Boolean: return Boolean.compare((Boolean) obj1, (Boolean) obj2); case Number: return Double.compare(((Number) obj1).doubleValue(), ((Number) obj2).doubleValue()); case String: return ((String) obj1).compareTo((String) obj2); case ArrayNode: case ObjectNode: try{ UInt128 hash1 = DistinctHash.getHash(obj1); UInt128 hash2 = DistinctHash.getHash(obj2); return hash1.compareTo(hash2); } catch (IOException e) { throw new IllegalStateException(String.format("Getting hash exception for type %s ", type1.toString()), e); } default: throw new ClassCastException(String.format("Unexpected type: %s", type1.toString())); } }
class SingletonHelper { private static final ItemComparator INSTANCE = new ItemComparator(); }
class SingletonHelper { private static final ItemComparator INSTANCE = new ItemComparator(); }
nit: weird formatting here
public static void main(String[] args) throws IOException { FormRecognizerAsyncClient client = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildAsyncClient(); File analyzeFile = new File("../formrecognizer/azure-ai-formrecognizer/src/samples/java/sample-forms/" + "forms/Form_1.jpg"); byte[] fileContent = Files.readAllBytes(analyzeFile.toPath()); PollerFlux<OperationResult, List<RecognizedForm>> labeledCustomFormPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions( toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{labeled_model_Id}") .setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true) .setPollInterval(Duration.ofSeconds(5))); PollerFlux<OperationResult, List<RecognizedForm>> unlabeledCustomFormPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{unlabeled_model_Id}", FormContentType.APPLICATION_PDF); Mono<List<RecognizedForm>> labeledDataResult = labeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); Mono<List<RecognizedForm>> unlabeledDataResult = unlabeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); System.out.println("--------Recognizing forms with labeled custom model--------"); labeledDataResult.subscribe(formsWithLabeledModel -> formsWithLabeledModel.forEach(labeledForm -> labeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f," + " %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); System.out.println("Value for a specific labeled field using the training-time label:"); labeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Merchant".equals(formFieldEntry.getKey())) .findAny() .ifPresentOrElse( formFieldEntry -> System.out.printf("The Merchant name is: %s%n", formFieldEntry.getValue()), () -> System.out.println("'Merchant' training-time label does not exist. Substitute it with " + "your own training-time label.") ); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } System.out.println("-----------------------------------------------------------"); System.out.println("-------Recognizing forms with unlabeled custom model-------"); unlabeledDataResult.subscribe(recognizedForms -> recognizedForms.forEach(unLabeledForm -> unLabeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } final StringBuilder boundingBoxLabelStr = new StringBuilder(); if (formField.getLabelText() != null && formField.getLabelText().getBoundingBox() != null) { formField.getLabelText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has label %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getLabelText().getText(), boundingBoxLabelStr, formField.getConfidence()); System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence " + "score of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); unLabeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Vendor Name:".equals(formFieldEntry.getValue().getLabelText().getText())) .findAny() .ifPresentOrElse( formFieldEntry -> System.out.printf("The Vendor name is: %s%n", formFieldEntry.getValue()), () -> System.out.println("'Vendor Name:' label text does not exist") ); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } }
public static void main(String[] args) throws IOException { FormRecognizerAsyncClient client = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildAsyncClient(); File analyzeFile = new File("../formrecognizer/azure-ai-formrecognizer/src/samples/java/sample-forms/" + "forms/Form_1.jpg"); byte[] fileContent = Files.readAllBytes(analyzeFile.toPath()); PollerFlux<OperationResult, List<RecognizedForm>> labeledCustomFormPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions( toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{labeled_model_Id}") .setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true) .setPollInterval(Duration.ofSeconds(5))); PollerFlux<OperationResult, List<RecognizedForm>> unlabeledCustomFormPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{unlabeled_model_Id}", FormContentType.APPLICATION_PDF); Mono<List<RecognizedForm>> labeledDataResult = labeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); Mono<List<RecognizedForm>> unlabeledDataResult = unlabeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); System.out.println("--------Recognizing forms with labeled custom model--------"); labeledDataResult.subscribe(formsWithLabeledModel -> formsWithLabeledModel.forEach(labeledForm -> labeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f," + " %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); System.out.println("Value for a specific labeled field using the training-time label:"); labeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Merchant".equals(formFieldEntry.getKey())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Merchant name is: %s%n", formFieldEntry.getValue())); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } System.out.println("-----------------------------------------------------------"); System.out.println("-------Recognizing forms with unlabeled custom model-------"); unlabeledDataResult.subscribe(recognizedForms -> recognizedForms.forEach(unLabeledForm -> unLabeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } final StringBuilder boundingBoxLabelStr = new StringBuilder(); if (formField.getLabelText() != null && formField.getLabelText().getBoundingBox() != null) { formField.getLabelText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has label %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getLabelText().getText(), boundingBoxLabelStr, formField.getConfidence()); System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence " + "score of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); unLabeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Vendor Name:".equals(formFieldEntry.getValue().getLabelText().getText())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Vendor name is: %s%n", formFieldEntry.getValue())); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } }
class AdvancedDiffLabeledUnlabeledDataAsync { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ }
class AdvancedDiffLabeledUnlabeledDataAsync { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ }
same here
public static void main(String[] args) throws IOException { FormRecognizerAsyncClient client = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildAsyncClient(); File analyzeFile = new File("../formrecognizer/azure-ai-formrecognizer/src/samples/java/sample-forms/" + "forms/Form_1.jpg"); byte[] fileContent = Files.readAllBytes(analyzeFile.toPath()); PollerFlux<OperationResult, List<RecognizedForm>> labeledCustomFormPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions( toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{labeled_model_Id}") .setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true) .setPollInterval(Duration.ofSeconds(5))); PollerFlux<OperationResult, List<RecognizedForm>> unlabeledCustomFormPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{unlabeled_model_Id}", FormContentType.APPLICATION_PDF); Mono<List<RecognizedForm>> labeledDataResult = labeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); Mono<List<RecognizedForm>> unlabeledDataResult = unlabeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); System.out.println("--------Recognizing forms with labeled custom model--------"); labeledDataResult.subscribe(formsWithLabeledModel -> formsWithLabeledModel.forEach(labeledForm -> labeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f," + " %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); System.out.println("Value for a specific labeled field using the training-time label:"); labeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Merchant".equals(formFieldEntry.getKey())) .findAny() .ifPresentOrElse( formFieldEntry -> System.out.printf("The Merchant name is: %s%n", formFieldEntry.getValue()), () -> System.out.println("'Merchant' training-time label does not exist. Substitute it with " + "your own training-time label.") ); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } System.out.println("-----------------------------------------------------------"); System.out.println("-------Recognizing forms with unlabeled custom model-------"); unlabeledDataResult.subscribe(recognizedForms -> recognizedForms.forEach(unLabeledForm -> unLabeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } final StringBuilder boundingBoxLabelStr = new StringBuilder(); if (formField.getLabelText() != null && formField.getLabelText().getBoundingBox() != null) { formField.getLabelText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has label %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getLabelText().getText(), boundingBoxLabelStr, formField.getConfidence()); System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence " + "score of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); unLabeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Vendor Name:".equals(formFieldEntry.getValue().getLabelText().getText())) .findAny() .ifPresentOrElse( formFieldEntry -> System.out.printf("The Vendor name is: %s%n", formFieldEntry.getValue()), () -> System.out.println("'Vendor Name:' label text does not exist") ); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } }
public static void main(String[] args) throws IOException { FormRecognizerAsyncClient client = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildAsyncClient(); File analyzeFile = new File("../formrecognizer/azure-ai-formrecognizer/src/samples/java/sample-forms/" + "forms/Form_1.jpg"); byte[] fileContent = Files.readAllBytes(analyzeFile.toPath()); PollerFlux<OperationResult, List<RecognizedForm>> labeledCustomFormPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions( toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{labeled_model_Id}") .setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true) .setPollInterval(Duration.ofSeconds(5))); PollerFlux<OperationResult, List<RecognizedForm>> unlabeledCustomFormPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{unlabeled_model_Id}", FormContentType.APPLICATION_PDF); Mono<List<RecognizedForm>> labeledDataResult = labeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); Mono<List<RecognizedForm>> unlabeledDataResult = unlabeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); System.out.println("--------Recognizing forms with labeled custom model--------"); labeledDataResult.subscribe(formsWithLabeledModel -> formsWithLabeledModel.forEach(labeledForm -> labeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f," + " %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); System.out.println("Value for a specific labeled field using the training-time label:"); labeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Merchant".equals(formFieldEntry.getKey())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Merchant name is: %s%n", formFieldEntry.getValue())); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } System.out.println("-----------------------------------------------------------"); System.out.println("-------Recognizing forms with unlabeled custom model-------"); unlabeledDataResult.subscribe(recognizedForms -> recognizedForms.forEach(unLabeledForm -> unLabeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } final StringBuilder boundingBoxLabelStr = new StringBuilder(); if (formField.getLabelText() != null && formField.getLabelText().getBoundingBox() != null) { formField.getLabelText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has label %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getLabelText().getText(), boundingBoxLabelStr, formField.getConfidence()); System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence " + "score of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); unLabeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Vendor Name:".equals(formFieldEntry.getValue().getLabelText().getText())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Vendor name is: %s%n", formFieldEntry.getValue())); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } }
class AdvancedDiffLabeledUnlabeledDataAsync { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ }
class AdvancedDiffLabeledUnlabeledDataAsync { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ }
Could be the diff mode, https://github.com/Azure/azure-sdk-for-java/blob/4364c78509af8849ca38fed9c030d9608ad5e13b/sdk/formrecognizer/azure-ai-formrecognizer/src/samples/java/com/azure/ai/formrecognizer/AdvancedDiffLabeledUnlabeledDataAsync.java#L94
public static void main(String[] args) throws IOException { FormRecognizerAsyncClient client = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildAsyncClient(); File analyzeFile = new File("../formrecognizer/azure-ai-formrecognizer/src/samples/java/sample-forms/" + "forms/Form_1.jpg"); byte[] fileContent = Files.readAllBytes(analyzeFile.toPath()); PollerFlux<OperationResult, List<RecognizedForm>> labeledCustomFormPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions( toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{labeled_model_Id}") .setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true) .setPollInterval(Duration.ofSeconds(5))); PollerFlux<OperationResult, List<RecognizedForm>> unlabeledCustomFormPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{unlabeled_model_Id}", FormContentType.APPLICATION_PDF); Mono<List<RecognizedForm>> labeledDataResult = labeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); Mono<List<RecognizedForm>> unlabeledDataResult = unlabeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); System.out.println("--------Recognizing forms with labeled custom model--------"); labeledDataResult.subscribe(formsWithLabeledModel -> formsWithLabeledModel.forEach(labeledForm -> labeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f," + " %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); System.out.println("Value for a specific labeled field using the training-time label:"); labeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Merchant".equals(formFieldEntry.getKey())) .findAny() .ifPresentOrElse( formFieldEntry -> System.out.printf("The Merchant name is: %s%n", formFieldEntry.getValue()), () -> System.out.println("'Merchant' training-time label does not exist. Substitute it with " + "your own training-time label.") ); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } System.out.println("-----------------------------------------------------------"); System.out.println("-------Recognizing forms with unlabeled custom model-------"); unlabeledDataResult.subscribe(recognizedForms -> recognizedForms.forEach(unLabeledForm -> unLabeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } final StringBuilder boundingBoxLabelStr = new StringBuilder(); if (formField.getLabelText() != null && formField.getLabelText().getBoundingBox() != null) { formField.getLabelText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has label %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getLabelText().getText(), boundingBoxLabelStr, formField.getConfidence()); System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence " + "score of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); unLabeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Vendor Name:".equals(formFieldEntry.getValue().getLabelText().getText())) .findAny() .ifPresentOrElse( formFieldEntry -> System.out.printf("The Vendor name is: %s%n", formFieldEntry.getValue()), () -> System.out.println("'Vendor Name:' label text does not exist") ); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } }
public static void main(String[] args) throws IOException { FormRecognizerAsyncClient client = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildAsyncClient(); File analyzeFile = new File("../formrecognizer/azure-ai-formrecognizer/src/samples/java/sample-forms/" + "forms/Form_1.jpg"); byte[] fileContent = Files.readAllBytes(analyzeFile.toPath()); PollerFlux<OperationResult, List<RecognizedForm>> labeledCustomFormPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions( toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{labeled_model_Id}") .setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true) .setPollInterval(Duration.ofSeconds(5))); PollerFlux<OperationResult, List<RecognizedForm>> unlabeledCustomFormPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{unlabeled_model_Id}", FormContentType.APPLICATION_PDF); Mono<List<RecognizedForm>> labeledDataResult = labeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); Mono<List<RecognizedForm>> unlabeledDataResult = unlabeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); System.out.println("--------Recognizing forms with labeled custom model--------"); labeledDataResult.subscribe(formsWithLabeledModel -> formsWithLabeledModel.forEach(labeledForm -> labeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f," + " %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); System.out.println("Value for a specific labeled field using the training-time label:"); labeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Merchant".equals(formFieldEntry.getKey())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Merchant name is: %s%n", formFieldEntry.getValue())); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } System.out.println("-----------------------------------------------------------"); System.out.println("-------Recognizing forms with unlabeled custom model-------"); unlabeledDataResult.subscribe(recognizedForms -> recognizedForms.forEach(unLabeledForm -> unLabeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } final StringBuilder boundingBoxLabelStr = new StringBuilder(); if (formField.getLabelText() != null && formField.getLabelText().getBoundingBox() != null) { formField.getLabelText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has label %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getLabelText().getText(), boundingBoxLabelStr, formField.getConfidence()); System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence " + "score of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); unLabeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Vendor Name:".equals(formFieldEntry.getValue().getLabelText().getText())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Vendor name is: %s%n", formFieldEntry.getValue())); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } }
class AdvancedDiffLabeledUnlabeledDataAsync { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ }
class AdvancedDiffLabeledUnlabeledDataAsync { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ }
Weird! Okay ignore me :)
public static void main(String[] args) throws IOException { FormRecognizerAsyncClient client = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildAsyncClient(); File analyzeFile = new File("../formrecognizer/azure-ai-formrecognizer/src/samples/java/sample-forms/" + "forms/Form_1.jpg"); byte[] fileContent = Files.readAllBytes(analyzeFile.toPath()); PollerFlux<OperationResult, List<RecognizedForm>> labeledCustomFormPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions( toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{labeled_model_Id}") .setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true) .setPollInterval(Duration.ofSeconds(5))); PollerFlux<OperationResult, List<RecognizedForm>> unlabeledCustomFormPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{unlabeled_model_Id}", FormContentType.APPLICATION_PDF); Mono<List<RecognizedForm>> labeledDataResult = labeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); Mono<List<RecognizedForm>> unlabeledDataResult = unlabeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); System.out.println("--------Recognizing forms with labeled custom model--------"); labeledDataResult.subscribe(formsWithLabeledModel -> formsWithLabeledModel.forEach(labeledForm -> labeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f," + " %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); System.out.println("Value for a specific labeled field using the training-time label:"); labeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Merchant".equals(formFieldEntry.getKey())) .findAny() .ifPresentOrElse( formFieldEntry -> System.out.printf("The Merchant name is: %s%n", formFieldEntry.getValue()), () -> System.out.println("'Merchant' training-time label does not exist. Substitute it with " + "your own training-time label.") ); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } System.out.println("-----------------------------------------------------------"); System.out.println("-------Recognizing forms with unlabeled custom model-------"); unlabeledDataResult.subscribe(recognizedForms -> recognizedForms.forEach(unLabeledForm -> unLabeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } final StringBuilder boundingBoxLabelStr = new StringBuilder(); if (formField.getLabelText() != null && formField.getLabelText().getBoundingBox() != null) { formField.getLabelText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has label %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getLabelText().getText(), boundingBoxLabelStr, formField.getConfidence()); System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence " + "score of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); unLabeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Vendor Name:".equals(formFieldEntry.getValue().getLabelText().getText())) .findAny() .ifPresentOrElse( formFieldEntry -> System.out.printf("The Vendor name is: %s%n", formFieldEntry.getValue()), () -> System.out.println("'Vendor Name:' label text does not exist") ); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } }
public static void main(String[] args) throws IOException { FormRecognizerAsyncClient client = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildAsyncClient(); File analyzeFile = new File("../formrecognizer/azure-ai-formrecognizer/src/samples/java/sample-forms/" + "forms/Form_1.jpg"); byte[] fileContent = Files.readAllBytes(analyzeFile.toPath()); PollerFlux<OperationResult, List<RecognizedForm>> labeledCustomFormPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions( toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{labeled_model_Id}") .setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true) .setPollInterval(Duration.ofSeconds(5))); PollerFlux<OperationResult, List<RecognizedForm>> unlabeledCustomFormPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{unlabeled_model_Id}", FormContentType.APPLICATION_PDF); Mono<List<RecognizedForm>> labeledDataResult = labeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); Mono<List<RecognizedForm>> unlabeledDataResult = unlabeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); System.out.println("--------Recognizing forms with labeled custom model--------"); labeledDataResult.subscribe(formsWithLabeledModel -> formsWithLabeledModel.forEach(labeledForm -> labeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f," + " %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); System.out.println("Value for a specific labeled field using the training-time label:"); labeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Merchant".equals(formFieldEntry.getKey())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Merchant name is: %s%n", formFieldEntry.getValue())); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } System.out.println("-----------------------------------------------------------"); System.out.println("-------Recognizing forms with unlabeled custom model-------"); unlabeledDataResult.subscribe(recognizedForms -> recognizedForms.forEach(unLabeledForm -> unLabeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } final StringBuilder boundingBoxLabelStr = new StringBuilder(); if (formField.getLabelText() != null && formField.getLabelText().getBoundingBox() != null) { formField.getLabelText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has label %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getLabelText().getText(), boundingBoxLabelStr, formField.getConfidence()); System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence " + "score of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); unLabeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Vendor Name:".equals(formFieldEntry.getValue().getLabelText().getText())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Vendor name is: %s%n", formFieldEntry.getValue())); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } }
class AdvancedDiffLabeledUnlabeledDataAsync { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ }
class AdvancedDiffLabeledUnlabeledDataAsync { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ }
remove?
public static void main(String[] args) throws IOException { FormRecognizerAsyncClient client = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildAsyncClient(); File analyzeFile = new File("../formrecognizer/azure-ai-formrecognizer/src/samples/java/sample-forms/" + "forms/Form_1.jpg"); byte[] fileContent = Files.readAllBytes(analyzeFile.toPath()); PollerFlux<OperationResult, List<RecognizedForm>> labeledCustomFormPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions( toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{labeled_model_Id}") .setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true) .setPollInterval(Duration.ofSeconds(5))); PollerFlux<OperationResult, List<RecognizedForm>> unlabeledCustomFormPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{unlabeled_model_Id}", FormContentType.APPLICATION_PDF); Mono<List<RecognizedForm>> labeledDataResult = labeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); Mono<List<RecognizedForm>> unlabeledDataResult = unlabeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); System.out.println("--------Recognizing forms with labeled custom model--------"); labeledDataResult.subscribe(formsWithLabeledModel -> formsWithLabeledModel.forEach(labeledForm -> labeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f," + " %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); System.out.println("Value for a specific labeled field using the training-time label:"); labeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Merchant".equals(formFieldEntry.getKey())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Merchant name is: %s%n", formFieldEntry.getValue())); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } System.out.println("-----------------------------------------------------------"); System.out.println("-------Recognizing forms with unlabeled custom model-------"); unlabeledDataResult.subscribe(recognizedForms -> recognizedForms.forEach(unLabeledForm -> unLabeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } final StringBuilder boundingBoxLabelStr = new StringBuilder(); if (formField.getLabelText() != null && formField.getLabelText().getBoundingBox() != null) { formField.getLabelText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has label %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getLabelText().getText(), boundingBoxLabelStr, formField.getConfidence()); System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence " + "score of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); unLabeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Vendor Name:".equals(formFieldEntry.getValue().getLabelText().getText())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Vendor name is: %s%n", formFieldEntry.getValue())); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } }
public static void main(String[] args) throws IOException { FormRecognizerAsyncClient client = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildAsyncClient(); File analyzeFile = new File("../formrecognizer/azure-ai-formrecognizer/src/samples/java/sample-forms/" + "forms/Form_1.jpg"); byte[] fileContent = Files.readAllBytes(analyzeFile.toPath()); PollerFlux<OperationResult, List<RecognizedForm>> labeledCustomFormPoller = client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions( toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{labeled_model_Id}") .setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true) .setPollInterval(Duration.ofSeconds(5))); PollerFlux<OperationResult, List<RecognizedForm>> unlabeledCustomFormPoller = client.beginRecognizeCustomForms(toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{unlabeled_model_Id}", FormContentType.APPLICATION_PDF); Mono<List<RecognizedForm>> labeledDataResult = labeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); Mono<List<RecognizedForm>> unlabeledDataResult = unlabeledCustomFormPoller .last() .flatMap(trainingOperationResponse -> { if (trainingOperationResponse.getStatus().isComplete()) { return trainingOperationResponse.getFinalResult(); } else { return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:" + trainingOperationResponse.getStatus())); } }); System.out.println("--------Recognizing forms with labeled custom model--------"); labeledDataResult.subscribe(formsWithLabeledModel -> formsWithLabeledModel.forEach(labeledForm -> labeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f," + " %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); System.out.println("Value for a specific labeled field using the training-time label:"); labeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Merchant".equals(formFieldEntry.getKey())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Merchant name is: %s%n", formFieldEntry.getValue())); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } System.out.println("-----------------------------------------------------------"); System.out.println("-------Recognizing forms with unlabeled custom model-------"); unlabeledDataResult.subscribe(recognizedForms -> recognizedForms.forEach(unLabeledForm -> unLabeledForm.getFields().forEach((label, formField) -> { final StringBuilder boundingBoxStr = new StringBuilder(); if (formField.getValueText().getBoundingBox() != null) { formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } final StringBuilder boundingBoxLabelStr = new StringBuilder(); if (formField.getLabelText() != null && formField.getLabelText().getBoundingBox() != null) { formField.getLabelText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append); } System.out.printf("Field %s has label %s within bounding box %s with a confidence score " + "of %.2f.%n", label, formField.getLabelText().getText(), boundingBoxLabelStr, formField.getConfidence()); System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence " + "score of %.2f.%n", label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr, formField.getConfidence()); unLabeledForm.getFields().entrySet() .stream() .filter(formFieldEntry -> "Vendor Name:".equals(formFieldEntry.getValue().getLabelText().getText())) .findAny() .ifPresent(formFieldEntry -> System.out.printf("The Vendor name is: %s%n", formFieldEntry.getValue())); }))); try { TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } }
class AdvancedDiffLabeledUnlabeledDataAsync { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ }
class AdvancedDiffLabeledUnlabeledDataAsync { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ }
Could you show the example of how to set request id?
private static void autoCompleteWithOneTermContext(SearchClient searchClient) { AutocompleteOptions params = new AutocompleteOptions().setAutocompleteMode( AutocompleteMode.ONE_TERM_WITH_CONTEXT); PagedIterableBase<AutocompleteItem, AutocompletePagedResponse> results = searchClient.autocomplete("coffee m", "sg", params, Context.NONE); System.out.println("Received results with one term context:"); results.forEach(result -> System.out.println(result.getText())); /* Output: * Received results with one term context: * coffee maker */ }
PagedIterableBase<AutocompleteItem, AutocompletePagedResponse> results = searchClient.autocomplete("coffee m",
private static void autoCompleteWithOneTermContext(SearchClient searchClient) { AutocompleteOptions params = new AutocompleteOptions().setAutocompleteMode( AutocompleteMode.ONE_TERM_WITH_CONTEXT); PagedIterableBase<AutocompleteItem, AutocompletePagedResponse> results = searchClient.autocomplete("coffee m", "sg", params, Context.NONE); System.out.println("Received results with one term context:"); results.forEach(result -> System.out.println(result.getText())); /* Output: * Received results with one term context: * coffee maker */ }
class AutoCompleteExample { /** * From the Azure portal, get your Azure Cognitive Search service URL and API key, * and set the values of these environment variables: */ private static final String ENDPOINT = Configuration.getGlobalConfiguration().get("AZURE_COGNITIVE_SEARCH_ENDPOINT"); private static final String API_KEY = Configuration.getGlobalConfiguration().get("AZURE_COGNITIVE_SEARCH_API_KEY"); public static void main(String[] args) { SearchClient searchClient = new SearchClientBuilder() .endpoint(ENDPOINT) .credential(new AzureKeyCredential(API_KEY)) .indexName("hotels-sample-index") .buildClient(); autoCompleteWithOneTermContext(searchClient); autoCompleteWithHighlighting(searchClient); autoCompleteWithFilterAndFuzzy(searchClient); } private static void autoCompleteWithHighlighting(SearchClient searchClient) { AutocompleteOptions params = new AutocompleteOptions() .setAutocompleteMode(AutocompleteMode.ONE_TERM) .setFilter("Address/City eq 'San Diego' or Address/City eq 'Hartford'") .setHighlightPreTag("<b>") .setHighlightPostTag("</b>"); PagedIterableBase<AutocompleteItem, AutocompletePagedResponse> results = searchClient.autocomplete("co", "sg", params, Context.NONE); System.out.println("Received results with highlighting:"); results.forEach(result -> System.out.println(result.getText())); /* Output: * Received results with highlighting: * coffee */ } private static void autoCompleteWithFilterAndFuzzy(SearchClient searchClient) { AutocompleteOptions params = new AutocompleteOptions() .setAutocompleteMode(AutocompleteMode.ONE_TERM) .setUseFuzzyMatching(true) .setFilter("HotelId ne '6' and Category eq 'Budget'"); PagedIterableBase<AutocompleteItem, AutocompletePagedResponse> results = searchClient.autocomplete("su", "sg", params, Context.NONE); System.out.println("Received results with filter and fuzzy:"); results.forEach(result -> System.out.println(result.getText())); /* Output: * Received results with filter and fuzzy: * suite */ } }
class AutoCompleteExample { /** * From the Azure portal, get your Azure Cognitive Search service URL and API key, * and set the values of these environment variables: */ private static final String ENDPOINT = Configuration.getGlobalConfiguration().get("AZURE_COGNITIVE_SEARCH_ENDPOINT"); private static final String API_KEY = Configuration.getGlobalConfiguration().get("AZURE_COGNITIVE_SEARCH_API_KEY"); public static void main(String[] args) { SearchClient searchClient = new SearchClientBuilder() .endpoint(ENDPOINT) .credential(new AzureKeyCredential(API_KEY)) .indexName("hotels-sample-index") .buildClient(); autoCompleteWithOneTermContext(searchClient); autoCompleteWithHighlighting(searchClient); autoCompleteWithFilterAndFuzzy(searchClient); } private static void autoCompleteWithHighlighting(SearchClient searchClient) { AutocompleteOptions params = new AutocompleteOptions() .setAutocompleteMode(AutocompleteMode.ONE_TERM) .setFilter("Address/City eq 'San Diego' or Address/City eq 'Hartford'") .setHighlightPreTag("<b>") .setHighlightPostTag("</b>"); PagedIterableBase<AutocompleteItem, AutocompletePagedResponse> results = searchClient.autocomplete("co", "sg", params, Context.NONE); System.out.println("Received results with highlighting:"); results.forEach(result -> System.out.println(result.getText())); /* Output: * Received results with highlighting: * coffee */ } private static void autoCompleteWithFilterAndFuzzy(SearchClient searchClient) { AutocompleteOptions params = new AutocompleteOptions() .setAutocompleteMode(AutocompleteMode.ONE_TERM) .setUseFuzzyMatching(true) .setFilter("HotelId ne '6' and Category eq 'Budget'"); PagedIterableBase<AutocompleteItem, AutocompletePagedResponse> results = searchClient.autocomplete("su", "sg", params, Context.NONE); System.out.println("Received results with filter and fuzzy:"); results.forEach(result -> System.out.println(result.getText())); /* Output: * Received results with filter and fuzzy: * suite */ } }